HBASE-12603 Remove javadoc warnings introduced due to removal of unused imports (Varun Saxena)
This commit is contained in:
parent
b6b88edf93
commit
56a03d736a
|
@ -27,10 +27,14 @@ import java.lang.annotation.RetentionPolicy;
|
|||
* class or method not changing over time. Currently the stability can be
|
||||
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
|
||||
*
|
||||
* <ul><li>All classes that are annotated with {@link Public} or
|
||||
* {@link LimitedPrivate} must have InterfaceStability annotation. </li>
|
||||
* <li>Classes that are {@link Private} are to be considered unstable unless
|
||||
* a different InterfaceStability annotation states otherwise.</li>
|
||||
* <ul><li>All classes that are annotated with
|
||||
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.Public} or
|
||||
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
|
||||
* must have InterfaceStability annotation. </li>
|
||||
* <li>Classes that are
|
||||
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
|
||||
* are to be considered unstable unless a different InterfaceStability annotation
|
||||
* states otherwise.</li>
|
||||
* <li>Incompatible changes must not be made to classes marked as stable.</li>
|
||||
* </ul>
|
||||
*/
|
||||
|
|
|
@ -54,7 +54,8 @@ public class ReversedScannerCallable extends ScannerCallable {
|
|||
* @param scan
|
||||
* @param scanMetrics
|
||||
* @param locateStartRow The start row for locating regions
|
||||
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver
|
||||
* @param rpcFactory to create an {@link com.google.protobuf.RpcController}
|
||||
* to talk to the regionserver
|
||||
*/
|
||||
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
||||
ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) {
|
||||
|
@ -68,7 +69,8 @@ public class ReversedScannerCallable extends ScannerCallable {
|
|||
* @param scan
|
||||
* @param scanMetrics
|
||||
* @param locateStartRow The start row for locating regions
|
||||
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver
|
||||
* @param rpcFactory to create an {@link com.google.protobuf.RpcController}
|
||||
* to talk to the regionserver
|
||||
* @param replicaId the replica id
|
||||
*/
|
||||
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
||||
|
@ -79,7 +81,8 @@ public class ReversedScannerCallable extends ScannerCallable {
|
|||
|
||||
/**
|
||||
* @deprecated use
|
||||
* {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan, ScanMetrics, byte[], RpcControllerFactory )}
|
||||
* {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan,
|
||||
* ScanMetrics, byte[], RpcControllerFactory )}
|
||||
*/
|
||||
@Deprecated
|
||||
public ReversedScannerCallable(ClusterConnection connection, TableName tableName,
|
||||
|
|
|
@ -95,7 +95,8 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
|
|||
* @param scan the scan to execute
|
||||
* @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
|
||||
* metrics
|
||||
* @param rpcControllerFactory factory to use when creating {@link RpcController}
|
||||
* @param rpcControllerFactory factory to use when creating
|
||||
* {@link com.google.protobuf.RpcController}
|
||||
*/
|
||||
public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan,
|
||||
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
|
||||
|
|
|
@ -30,8 +30,9 @@ import com.google.protobuf.Message;
|
|||
* Defines how value for specific column is interpreted and provides utility
|
||||
* methods like compare, add, multiply etc for them. Takes column family, column
|
||||
* qualifier and return the cell value. Its concrete implementation should
|
||||
* handle null case gracefully. Refer to {@link LongColumnInterpreter} for an
|
||||
* example.
|
||||
* handle null case gracefully.
|
||||
* Refer to {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}
|
||||
* for an example.
|
||||
* <p>
|
||||
* Takes two generic parameters and three Message parameters.
|
||||
* The cell value type of the interpreter is <T>.
|
||||
|
@ -127,7 +128,8 @@ Q extends Message, R extends Message> {
|
|||
* server side to construct the ColumnInterpreter. The server
|
||||
* will pass this to the {@link #initialize}
|
||||
* method. If there is no ColumnInterpreter specific data (for e.g.,
|
||||
* {@link LongColumnInterpreter}) then null should be returned.
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter})
|
||||
* then null should be returned.
|
||||
* @return the PB message
|
||||
*/
|
||||
public abstract P getRequestData();
|
||||
|
|
|
@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
|||
* <p>
|
||||
* Multiple filters can be combined using {@link FilterList}.
|
||||
* <p>
|
||||
* If an already known column qualifier is looked for, use {@link Get#addColumn}
|
||||
* If an already known column qualifier is looked for,
|
||||
* use {@link org.apache.hadoop.hbase.client.Get#addColumn}
|
||||
* directly rather than a filter.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -40,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
|||
* <p>
|
||||
* Multiple filters can be combined using {@link FilterList}.
|
||||
* <p>
|
||||
* If an already known row range needs to be scanned, use {@link Scan} start
|
||||
* If an already known row range needs to be scanned,
|
||||
* use {@link org.apache.hadoop.hbase.CellScanner} start
|
||||
* and stop rows directly rather than a filter.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -52,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
|||
* long value), then you can pass in your own comparator instead.
|
||||
* <p>
|
||||
* You must also specify a family and qualifier. Only the value of this column
|
||||
* will be tested. When using this filter on a {@link Scan} with specified
|
||||
* will be tested. When using this filter on a
|
||||
* {@link org.apache.hadoop.hbase.CellScanner} with specified
|
||||
* inputs, the column to be tested should also be added as input (otherwise
|
||||
* the filter will regard the column as missing).
|
||||
* <p>
|
||||
|
|
|
@ -36,8 +36,8 @@ import com.google.protobuf.ServiceException;
|
|||
|
||||
/**
|
||||
* Base class which provides clients with an RPC connection to
|
||||
* call coprocessor endpoint {@link Service}s. Note that clients should not use this class
|
||||
* directly, except through
|
||||
* call coprocessor endpoint {@link com.google.protobuf.Service}s.
|
||||
* Note that clients should not use this class directly, except through
|
||||
* {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -23,7 +23,8 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
|
||||
/**
|
||||
* A {@link RemoteException} with some extra information. If source exception
|
||||
* was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true.
|
||||
* was a {@link org.apache.hadoop.hbase.DoNotRetryIOException},
|
||||
* {@link #isDoNotRetry()} will return true.
|
||||
* <p>A {@link RemoteException} hosts exceptions we got from the server.
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
|
|
|
@ -29,11 +29,11 @@ import com.google.protobuf.RpcController;
|
|||
/**
|
||||
* Used for server-side protobuf RPC service invocations. This handler allows
|
||||
* invocation exceptions to easily be passed through to the RPC server from coprocessor
|
||||
* {@link Service} implementations.
|
||||
* {@link com.google.protobuf.Service} implementations.
|
||||
*
|
||||
* <p>
|
||||
* When implementing {@link Service} defined methods, coprocessor endpoints can use the following
|
||||
* pattern to pass exceptions back to the RPC client:
|
||||
* When implementing {@link com.google.protobuf.Service} defined methods,
|
||||
* coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
|
||||
* <code>
|
||||
* public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
|
||||
* MyResponse response = null;
|
||||
|
@ -53,7 +53,8 @@ import com.google.protobuf.RpcController;
|
|||
public class ServerRpcController implements RpcController {
|
||||
/**
|
||||
* The exception thrown within
|
||||
* {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
|
||||
* {@link com.google.protobuf.Service#callMethod(
|
||||
* Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
|
||||
* if any.
|
||||
*/
|
||||
// TODO: it would be good widen this to just Throwable, but IOException is what we allow now
|
||||
|
@ -97,7 +98,7 @@ public class ServerRpcController implements RpcController {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets an exception to be communicated back to the {@link Service} client.
|
||||
* Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client.
|
||||
* @param ioe the exception encountered during execution of the service method
|
||||
*/
|
||||
public void setFailedOn(IOException ioe) {
|
||||
|
|
|
@ -281,7 +281,7 @@ public class PoolMap<K, V> implements Map<K, V> {
|
|||
|
||||
/**
|
||||
* The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
|
||||
* on the {@link LinkedList} class. It essentially allows resources to be
|
||||
* on the {@link java.util.LinkedList} class. It essentially allows resources to be
|
||||
* checked out, at which point it is removed from this pool. When the resource
|
||||
* is no longer required, it should be returned to the pool in order to be
|
||||
* reused.
|
||||
|
|
|
@ -28,13 +28,14 @@ import org.apache.hadoop.hbase.io.CellOutputStream;
|
|||
/**
|
||||
* Encoder/Decoder for Cell.
|
||||
*
|
||||
* <p>Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based
|
||||
* <p>Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
|
||||
* only Cell-based rather than KeyValue version 1 based
|
||||
* and without presuming an hfile context. Intent is an Interface that will work for hfile and
|
||||
* rpc.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
|
||||
public interface Codec {
|
||||
// TODO: interfacing with {@link DataBlockEncoder}
|
||||
// TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
|
||||
/**
|
||||
* Call flush when done. Some encoders may not put anything on the stream until flush is called.
|
||||
* On flush, let go of any resources used by the encoder.
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|||
* and flushes, or to build a byte[] to send to the client. This could be backed by a
|
||||
* List<KeyValue>, but more efficient implementations will append results to a
|
||||
* byte[] to eliminate overhead, and possibly encode the cells further.
|
||||
* <p>To read Cells, use {@link CellScanner}
|
||||
* @see CellScanner
|
||||
* <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
|
||||
* @see org.apache.hadoop.hbase.CellScanner
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
|
|||
* qualifiers.
|
||||
* </p>
|
||||
* <p>
|
||||
* {@code DataType}s are different from Hadoop {@link Writable}s in two
|
||||
* {@code DataType}s are different from Hadoop
|
||||
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two
|
||||
* significant ways. First, {@code DataType} describes how to serialize a
|
||||
* value, it does not encapsulate a serialized value. Second, {@code DataType}
|
||||
* implementations provide hints to consumers about relationships between the
|
||||
|
|
|
@ -24,9 +24,11 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
|
|||
|
||||
/**
|
||||
* An {@code DataType} that encodes fixed-length values encoded using
|
||||
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it
|
||||
* easier to transition away from direct use of {@link Bytes}.
|
||||
* @see Bytes#putBytes(byte[], int, byte[], int, int)
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes#putBytes(
|
||||
* byte[], int, byte[], int, int)}. Intended to make it
|
||||
* easier to transition away from direct use of
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
|
||||
* @see RawBytes
|
||||
* @see OrderedBlob
|
||||
* @see OrderedBlobVar
|
||||
|
|
|
@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
|
|||
|
||||
/**
|
||||
* An {@code DataType} that encodes variable-length values encoded using
|
||||
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a
|
||||
* termination marker following the raw {@code byte[]} value. Intended to
|
||||
* make it easier to transition away from direct use of {@link Bytes}.
|
||||
* @see Bytes#putBytes(byte[], int, byte[], int, int)
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}.
|
||||
* Includes a termination marker following the raw {@code byte[]} value. Intended to make it easier
|
||||
* to transition away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
|
||||
* @see RawBytes
|
||||
* @see OrderedBlob
|
||||
*/
|
||||
|
|
|
@ -23,10 +23,11 @@ import org.apache.hadoop.hbase.util.Order;
|
|||
|
||||
/**
|
||||
* An {@code DataType} that encodes fixed-length values encoded using
|
||||
* {@link Bytes#toBytes(String)}. Intended to make it easier to transition
|
||||
* away from direct use of {@link Bytes}.
|
||||
* @see Bytes#toBytes(String)
|
||||
* @see Bytes#toString(byte[], int, int)
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
|
||||
* Intended to make it easier to transition away from direct use of
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
|
||||
* @see RawString
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -23,11 +23,12 @@ import org.apache.hadoop.hbase.util.Order;
|
|||
|
||||
/**
|
||||
* An {@code DataType} that encodes variable-length values encoded using
|
||||
* {@link Bytes#toBytes(String)}. Includes a termination marker following the
|
||||
* {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
|
||||
* Includes a termination marker following the
|
||||
* raw {@code byte[]} value. Intended to make it easier to transition
|
||||
* away from direct use of {@link Bytes}.
|
||||
* @see Bytes#toBytes(String)
|
||||
* @see Bytes#toString(byte[], int, int)
|
||||
* away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
|
||||
* @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
|
||||
* @see RawString
|
||||
* @see OrderedString
|
||||
*/
|
||||
|
|
|
@ -35,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
|
||||
PositionedByteRange {
|
||||
/**
|
||||
* The current index into the range. Like {@link ByteBuffer} position, it
|
||||
* The current index into the range. Like {@link java.nio.ByteBuffer} position, it
|
||||
* points to the next value that will be read/written in the array. It
|
||||
* provides the appearance of being 0-indexed, even though its value is
|
||||
* calculated according to offset.
|
||||
|
@ -182,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
|||
@Override
|
||||
public abstract int putVLong(long val);
|
||||
/**
|
||||
* Similar to {@link ByteBuffer#flip()}. Sets length to position, position to
|
||||
* Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to
|
||||
* offset.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
|
@ -194,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
|||
}
|
||||
|
||||
/**
|
||||
* Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to
|
||||
* Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to
|
||||
* capacity.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
|
|
|
@ -33,7 +33,7 @@ import com.google.common.base.Supplier;
|
|||
|
||||
/**
|
||||
* A simple concurrent map of sets. This is similar in concept to
|
||||
* {@link Multiset}, with the following exceptions:
|
||||
* {@link com.google.common.collect.Multiset}, with the following exceptions:
|
||||
* <ul>
|
||||
* <li>The set is thread-safe and concurrent: no external locking or
|
||||
* synchronization is required. This is important for the use case where
|
||||
|
@ -109,7 +109,7 @@ public class ConcurrentIndex<K, V> {
|
|||
* associated. <b>Note:</b> if the caller wishes to add or removes values
|
||||
* to under the specified as they're iterating through the returned value,
|
||||
* they should make a defensive copy; otherwise, a
|
||||
* {@link ConcurrentModificationException} may be thrown.
|
||||
* {@link java.util.ConcurrentModificationException} may be thrown.
|
||||
* @param key The key
|
||||
* @return All values associated with the specified key or null if no values
|
||||
* are associated with the key.
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|||
* their own index into the array.
|
||||
* </p>
|
||||
* <p>
|
||||
* Designed to be a slimmed-down, mutable alternative to {@link ByteBuffer}.
|
||||
* Designed to be a slimmed-down, mutable alternative to {@link java.nio.ByteBuffer}.
|
||||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
|
|
|
@ -51,7 +51,8 @@ import org.apache.zookeeper.ZooKeeper;
|
|||
* This is an example showing how a RegionObserver could configured
|
||||
* via ZooKeeper in order to control a Region compaction, flush, and scan policy.
|
||||
*
|
||||
* This also demonstrated the use of shared {@link RegionObserver} state.
|
||||
* This also demonstrated the use of shared
|
||||
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
|
||||
* See {@link RegionCoprocessorEnvironment#getSharedData()}.
|
||||
*
|
||||
* This would be useful for an incremental backup tool, which would indicate the last
|
||||
|
@ -59,7 +60,8 @@ import org.apache.zookeeper.ZooKeeper;
|
|||
* inserted since (based on wall clock time).
|
||||
*
|
||||
* This implements org.apache.zookeeper.Watcher directly instead of using
|
||||
* {@link ZooKeeperWatcher}, because RegionObservers come and go and currently
|
||||
* {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
|
||||
* because RegionObservers come and go and currently
|
||||
* listeners registered with ZooKeeperWatcher cannot be removed.
|
||||
*/
|
||||
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
|
||||
|
|
|
@ -35,9 +35,11 @@ import org.apache.zookeeper.KeeperException;
|
|||
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
|
||||
* currently being archived.
|
||||
* <p>
|
||||
* This only works properly if the {@link TimeToLiveHFileCleaner} is also enabled (it always should
|
||||
* be), since it may take a little time for the ZK notification to propagate, in which case we may
|
||||
* accidentally delete some files.
|
||||
* This only works properly if the
|
||||
* {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
|
||||
* is also enabled (it always should be), since it may take a little time
|
||||
* for the ZK notification to propagate, in which case we may accidentally
|
||||
* delete some files.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
||||
|
|
|
@ -49,7 +49,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
|
|||
* <p>
|
||||
* This also allows one to run the scan from an
|
||||
* online or offline hbase cluster. The snapshot files can be exported by using the
|
||||
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this scanner can be used to
|
||||
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
|
||||
* to a pure-hdfs cluster, and this scanner can be used to
|
||||
* run the scan directly over the snapshot files. The snapshot should not be deleted while there
|
||||
* are open scanners reading from snapshot files.
|
||||
*
|
||||
|
@ -65,7 +66,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
|
|||
* snapshot files, the job has to be run as the HBase user or the user must have group or other
|
||||
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
|
||||
* snapshot/data files will completely circumvent the access control enforced by HBase.
|
||||
* @see TableSnapshotInputFormat
|
||||
* @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
|
|
|
@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.constraint;
|
|||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Exception that a user defined constraint throws on failure of a {@link Put}.
|
||||
* <p>
|
||||
* Does <b>NOT</b> attempt the {@link Put} multiple times, since the constraint
|
||||
* <it>should</it> fail every time for the same {@link Put} (it should be
|
||||
* Exception that a user defined constraint throws on failure of a
|
||||
* {@link org.apache.hadoop.hbase.client.Put}.
|
||||
* <p>Does <b>NOT</b> attempt the
|
||||
* {@link org.apache.hadoop.hbase.client.Put} multiple times,
|
||||
* since the constraint <it>should</it> fail every time for
|
||||
* the same {@link org.apache.hadoop.hbase.client.Put} (it should be
|
||||
* idempotent).
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -42,11 +42,11 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* <P>
|
||||
* Methods required for task life circle: <BR>
|
||||
* {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
|
||||
* {@link MasterFileSystem} <BR>
|
||||
* {@link org.apache.hadoop.hbase.master.MasterFileSystem} <BR>
|
||||
* {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
|
||||
* recovering. Called after all tasks processed <BR>
|
||||
* {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
|
||||
* {@link MasterFileSystem} after Active Master is initialized <BR>
|
||||
* {@link org.apache.hadoop.hbase.master.MasterFileSystem} after Active Master is initialized <BR>
|
||||
* {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
|
||||
* recovery has been made<BR>
|
||||
* {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>
|
||||
|
|
|
@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* Coordinated operations for {@link SplitLogWorker} and {@link WALSplitterHandler} Important
|
||||
* Coordinated operations for {@link SplitLogWorker} and
|
||||
* {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important
|
||||
* methods for SplitLogWorker: <BR>
|
||||
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
|
||||
* ready to supply the tasks <BR>
|
||||
|
|
|
@ -67,7 +67,8 @@ import org.apache.zookeeper.ZooDefs.Ids;
|
|||
import org.apache.zookeeper.data.Stat;
|
||||
|
||||
/**
|
||||
* ZooKeeper based implementation of {@link SplitLogManagerCoordination}
|
||||
* ZooKeeper based implementation of
|
||||
* {@link org.apache.hadoop.hbase.master.SplitLogManagerCoordination}
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
||||
|
@ -682,7 +683,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
|||
|
||||
/**
|
||||
* ZooKeeper implementation of
|
||||
* {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
|
||||
* {@link org.apache.hadoop.hbase.master.
|
||||
* SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
|
||||
*/
|
||||
@Override
|
||||
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
|
||||
|
@ -904,9 +906,10 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
|||
|
||||
|
||||
/**
|
||||
* {@link SplitLogManager} can use objects implementing this interface to finish off a partially
|
||||
* done task by {@link SplitLogWorker}. This provides a serialization point at the end of the task
|
||||
* processing. Must be restartable and idempotent.
|
||||
* {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this
|
||||
* interface to finish off a partially done task by
|
||||
* {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a
|
||||
* serialization point at the end of the task processing. Must be restartable and idempotent.
|
||||
*/
|
||||
public interface TaskFinisher {
|
||||
/**
|
||||
|
@ -1067,7 +1070,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
|||
* Asynchronous handler for zk create RESCAN-node results. Retries on failures.
|
||||
* <p>
|
||||
* A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
|
||||
* {@link SplitLogWorker}s to rescan for new tasks.
|
||||
* {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}s to rescan for new tasks.
|
||||
*/
|
||||
public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
|
||||
private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);
|
||||
|
|
|
@ -583,8 +583,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
|
|||
* Next part is related to WALSplitterHandler
|
||||
*/
|
||||
/**
|
||||
* endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to
|
||||
* timeout the task node.
|
||||
* endTask() can fail and the only way to recover out of it is for the
|
||||
* {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
|
||||
* @param slt
|
||||
* @param ctr
|
||||
*/
|
||||
|
|
|
@ -212,8 +212,9 @@ public interface RegionObserver extends Coprocessor {
|
|||
* options:
|
||||
* <ul>
|
||||
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
||||
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
|
||||
* scanner, applying its own policy to what gets written.</li>
|
||||
* from this method. The custom scanner can then inspect
|
||||
* {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
|
||||
* policy to what gets written.</li>
|
||||
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
||||
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
||||
* bypassing core compaction using this approach must write out new store files themselves or the
|
||||
|
@ -238,8 +239,9 @@ public interface RegionObserver extends Coprocessor {
|
|||
* options:
|
||||
* <ul>
|
||||
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
||||
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
|
||||
* scanner, applying its own policy to what gets written.</li>
|
||||
* from this method. The custom scanner can then inspect
|
||||
* {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
|
||||
* policy to what gets written.</li>
|
||||
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
||||
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
||||
* bypassing core compaction using this approach must write out new store files themselves or the
|
||||
|
@ -269,7 +271,8 @@ public interface RegionObserver extends Coprocessor {
|
|||
* effect in this hook.
|
||||
* @param c the environment provided by the region server
|
||||
* @param store the store being compacted
|
||||
* @param scanners the list {@link StoreFileScanner}s to be read from
|
||||
* @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
|
||||
* to be read from
|
||||
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
||||
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
||||
* files
|
||||
|
@ -293,7 +296,8 @@ public interface RegionObserver extends Coprocessor {
|
|||
* effect in this hook.
|
||||
* @param c the environment provided by the region server
|
||||
* @param store the store being compacted
|
||||
* @param scanners the list {@link StoreFileScanner}s to be read from
|
||||
* @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
|
||||
* to be read from
|
||||
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
||||
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
||||
* files
|
||||
|
|
|
@ -339,9 +339,9 @@ public class HFileBlock implements Cacheable {
|
|||
/**
|
||||
* Returns the buffer this block stores internally. The clients must not
|
||||
* modify the buffer object. This method has to be public because it is
|
||||
* used in {@link CompoundBloomFilter} to avoid object creation on every
|
||||
* Bloom filter lookup, but has to be used with caution. Checksum data
|
||||
* is not included in the returned buffer but header data is.
|
||||
* used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} to avoid object
|
||||
* creation on every Bloom filter lookup, but has to be used with caution.
|
||||
* Checksum data is not included in the returned buffer but header data is.
|
||||
*
|
||||
* @return the buffer of this block for read-only operations
|
||||
*/
|
||||
|
@ -354,7 +354,7 @@ public class HFileBlock implements Cacheable {
|
|||
/**
|
||||
* Returns the buffer of this block, including header data. The clients must
|
||||
* not modify the buffer object. This method has to be public because it is
|
||||
* used in {@link BucketCache} to avoid buffer copy.
|
||||
* used in {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} to avoid buffer copy.
|
||||
*
|
||||
* @return the buffer with header and checksum included for read-only operations
|
||||
*/
|
||||
|
|
|
@ -54,9 +54,9 @@ import org.apache.hadoop.util.StringUtils;
|
|||
* ({@link BlockIndexReader}) single-level and multi-level block indexes.
|
||||
*
|
||||
* Examples of how to use the block index writer can be found in
|
||||
* {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how
|
||||
* to use the reader can be found in {@link HFileReaderV2} and
|
||||
* TestHFileBlockIndex.
|
||||
* {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter} and
|
||||
* {@link HFileWriterV2}. Examples of how to use the reader can be
|
||||
* found in {@link HFileReaderV2} and TestHFileBlockIndex.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class HFileBlockIndex {
|
||||
|
|
|
@ -131,7 +131,7 @@ public interface HFileScanner {
|
|||
*/
|
||||
ByteBuffer getValue();
|
||||
/**
|
||||
* @return Instance of {@link KeyValue}.
|
||||
* @return Instance of {@link org.apache.hadoop.hbase.KeyValue}.
|
||||
*/
|
||||
Cell getKeyValue();
|
||||
/**
|
||||
|
|
|
@ -57,16 +57,16 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|||
* {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
|
||||
* constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
|
||||
*
|
||||
* Contains three levels of block priority to allow for
|
||||
* scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An
|
||||
* in-memory column family is a column family that should be served from memory if possible):
|
||||
* Contains three levels of block priority to allow for scan-resistance and in-memory families
|
||||
* {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column
|
||||
* family is a column family that should be served from memory if possible):
|
||||
* single-access, multiple-accesses, and in-memory priority.
|
||||
* A block is added with an in-memory priority flag if
|
||||
* {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access
|
||||
* priority the first time it is read into this block cache. If a block is accessed again while
|
||||
* in cache, it is marked as a multiple access priority block. This delineation of blocks is used
|
||||
* to prevent scans from thrashing the cache adding a least-frequently-used
|
||||
* element to the eviction algorithm.<p>
|
||||
* {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a
|
||||
* single access priority the first time it is read into this block cache. If a block is
|
||||
* accessed again while in cache, it is marked as a multiple access priority block. This
|
||||
* delineation of blocks is used to prevent scans from thrashing the cache adding a
|
||||
* least-frequently-used element to the eviction algorithm.<p>
|
||||
*
|
||||
* Each priority is given its own chunk of the total cache to ensure
|
||||
* fairness during eviction. Each priority will retain close to its maximum
|
||||
|
|
|
@ -82,8 +82,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|||
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
|
||||
*
|
||||
* <p>BucketCache can be used as mainly a block cache (see
|
||||
* {@link CombinedBlockCache}), combined with LruBlockCache to decrease CMS GC and
|
||||
* heap fragmentation.
|
||||
* {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
|
||||
* LruBlockCache to decrease CMS GC and heap fragmentation.
|
||||
*
|
||||
* <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
|
||||
* blocks) to enlarge cache space via
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
|
|||
/**
|
||||
* A job with a map to count rows.
|
||||
* Map outputs table rows IF the input row has columns that have content.
|
||||
* Uses an {@link IdentityReducer}
|
||||
* Uses a org.apache.hadoop.mapred.lib.IdentityReducer
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
|
|
|
@ -103,9 +103,11 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
* Calculates the splits that will serve as input for the map tasks.
|
||||
* <ul>
|
||||
* Splits are created in number equal to the smallest between numSplits and
|
||||
* the number of {@link HRegion}s in the table. If the number of splits is
|
||||
* smaller than the number of {@link HRegion}s then splits are spanned across
|
||||
* multiple {@link HRegion}s and are grouped the most evenly possible. In the
|
||||
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
||||
* If the number of splits is smaller than the number of
|
||||
* {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
|
||||
* multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
|
||||
* and are grouped the most evenly possible. In the
|
||||
* case splits are uneven the bigger splits are placed first in the
|
||||
* {@link InputSplit} array.
|
||||
*
|
||||
|
|
|
@ -59,7 +59,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param htable the {@link HTable} to scan.
|
||||
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||
*/
|
||||
public void setHTable(Table htable) {
|
||||
this.recordReaderImpl.setHTable(htable);
|
||||
|
|
|
@ -113,7 +113,7 @@ public class TableRecordReaderImpl {
|
|||
return this.startRow;
|
||||
}
|
||||
/**
|
||||
* @param htable the {@link HTable} to scan.
|
||||
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||
*/
|
||||
public void setHTable(Table htable) {
|
||||
Configuration conf = htable.getConfiguration();
|
||||
|
|
|
@ -32,7 +32,8 @@ import org.apache.hadoop.mapreduce.RecordReader;
|
|||
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
||||
|
||||
/**
|
||||
* Simple {@link InputFormat} for {@link WAL} files.
|
||||
* Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL}
|
||||
* files.
|
||||
* @deprecated use {@link WALInputFormat}
|
||||
*/
|
||||
@Deprecated
|
||||
|
|
|
@ -59,9 +59,9 @@ extends TableReducer<Writable, Mutation, Writable> {
|
|||
|
||||
/**
|
||||
* Writes each given record, consisting of the row key and the given values,
|
||||
* to the configured {@link OutputFormat}. It is emitting the row key and each
|
||||
* {@link org.apache.hadoop.hbase.client.Put Put} or
|
||||
* {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
|
||||
* to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}.
|
||||
* It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put}
|
||||
* or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
|
||||
*
|
||||
* @param key The current row key.
|
||||
* @param values The {@link org.apache.hadoop.hbase.client.Put Put} or
|
||||
|
|
|
@ -119,7 +119,7 @@ public class TableRecordReaderImpl {
|
|||
/**
|
||||
* Sets the HBase table.
|
||||
*
|
||||
* @param htable The {@link HTable} to scan.
|
||||
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||
*/
|
||||
public void setHTable(Table htable) {
|
||||
Configuration conf = htable.getConfiguration();
|
||||
|
|
|
@ -49,9 +49,9 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* wals, etc) directly to provide maximum performance. The snapshot is not required to be
|
||||
* restored to the live cluster or cloned. This also allows to run the mapreduce job from an
|
||||
* online or offline hbase cluster. The snapshot files can be exported by using the
|
||||
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to
|
||||
* run the mapreduce job directly over the snapshot files. The snapshot should not be deleted
|
||||
* while there are jobs reading from snapshot files.
|
||||
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
|
||||
* and this InputFormat can be used to run the mapreduce job directly over the snapshot files.
|
||||
* The snapshot should not be deleted while there are jobs reading from snapshot files.
|
||||
* <p>
|
||||
* Usage is similar to TableInputFormat, and
|
||||
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
|
||||
|
@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* <p>
|
||||
* Internally, this input format restores the snapshot into the given tmp directory. Similar to
|
||||
* {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
|
||||
* from each RecordReader. An internal RegionScanner is used to execute the {@link Scan} obtained
|
||||
* from the user.
|
||||
* from each RecordReader. An internal RegionScanner is used to execute the
|
||||
* {@link org.apache.hadoop.hbase.CellScanner} obtained from the user.
|
||||
* <p>
|
||||
* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
|
||||
* snapshot files and data files.
|
||||
|
@ -79,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* user or the user must have group or other privileges in the filesystem (See HBASE-8369).
|
||||
* Note that, given other users access to read from snapshot/data files will completely circumvent
|
||||
* the access control enforced by HBase.
|
||||
* @see TableSnapshotScanner
|
||||
* @see org.apache.hadoop.hbase.client.TableSnapshotScanner
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
|
|
|
@ -150,7 +150,7 @@ import com.google.protobuf.Service;
|
|||
*
|
||||
* <p>You can also shutdown just this master. Call {@link #stopMaster()}.
|
||||
*
|
||||
* @see Watcher
|
||||
* @see org.apache.zookeeper.Watcher
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@SuppressWarnings("deprecation")
|
||||
|
|
|
@ -74,8 +74,9 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
* <p>SplitLogManager monitors the tasks that it creates using the
|
||||
* timeoutMonitor thread. If a task's progress is slow then
|
||||
* {@link SplitLogManagerCoordination#checkTasks} will take away the
|
||||
* task from the owner {@link SplitLogWorker} and the task will be up for grabs again. When the
|
||||
* task is done then it is deleted by SplitLogManager.
|
||||
* task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
|
||||
* and the task will be up for grabs again. When the task is done then it is
|
||||
* deleted by SplitLogManager.
|
||||
*
|
||||
* <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
|
||||
* log files. The caller thread waits in this method until all the log files
|
||||
|
|
|
@ -60,8 +60,9 @@ import com.google.common.collect.Sets;
|
|||
|
||||
/**
|
||||
* The base class for load balancers. It provides the the functions used to by
|
||||
* {@link AssignmentManager} to assign regions in the edge cases. It doesn't
|
||||
* provide an implementation of the actual balancing algorithm.
|
||||
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
|
||||
* in the edge cases. It doesn't provide an implementation of the
|
||||
* actual balancing algorithm.
|
||||
*
|
||||
*/
|
||||
public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||
|
|
|
@ -41,16 +41,17 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position;
|
|||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
/**
|
||||
* An implementation of the {@link LoadBalancer} that assigns favored nodes for
|
||||
* each region. There is a Primary RegionServer that hosts the region, and then
|
||||
* there is Secondary and Tertiary RegionServers. Currently, the favored nodes
|
||||
* information is used in creating HDFS files - the Primary RegionServer passes
|
||||
* the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem
|
||||
* API for creating files on the filesystem. These nodes are treated as hints by
|
||||
* the HDFS to place the blocks of the file. This alleviates the problem to do with
|
||||
* reading from remote nodes (since we can make the Secondary RegionServer as the new
|
||||
* Primary RegionServer) after a region is recovered. This should help provide consistent
|
||||
* read latencies for the regions even when their primary region servers die.
|
||||
* An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that
|
||||
* assigns favored nodes for each region. There is a Primary RegionServer that hosts
|
||||
* the region, and then there is Secondary and Tertiary RegionServers. Currently, the
|
||||
* favored nodes information is used in creating HDFS files - the Primary RegionServer
|
||||
* passes the primary, secondary, tertiary node addresses as hints to the
|
||||
* DistributedFileSystem API for creating files on the filesystem. These nodes are
|
||||
* treated as hints by the HDFS to place the blocks of the file. This alleviates the
|
||||
* problem to do with reading from remote nodes (since we can make the Secondary
|
||||
* RegionServer as the new Primary RegionServer) after a region is recovered. This
|
||||
* should help provide consistent read latencies for the regions even when their
|
||||
* primary region servers die.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
|
|
|
@ -51,7 +51,8 @@ import com.google.common.collect.MinMaxPriorityQueue;
|
|||
* <p>On cluster startup, bulk assignment can be used to determine
|
||||
* locations for all Regions in a cluster.
|
||||
*
|
||||
* <p>This classes produces plans for the {@link AssignmentManager} to execute.
|
||||
* <p>This classes produces plans for the
|
||||
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to execute.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class SimpleLoadBalancer extends BaseLoadBalancer {
|
||||
|
|
|
@ -33,9 +33,10 @@ import org.apache.hadoop.hbase.master.MasterServices;
|
|||
/**
|
||||
* Handle logReplay work from SSH. Having a separate handler is not to block SSH in re-assigning
|
||||
* regions from dead servers. Otherwise, available SSH handlers could be blocked by logReplay work
|
||||
* (from {@link MasterFileSystem#splitLog(ServerName)}). During logReplay, if a receiving RS(say A)
|
||||
* fails again, regions on A won't be able to be assigned to another live RS which causes the log
|
||||
* replay unable to complete because WAL edits replay depends on receiving RS to be live
|
||||
* (from {@link org.apache.hadoop.hbase.master.MasterFileSystem#splitLog(ServerName)}).
|
||||
* During logReplay, if a receiving RS(say A) fails again, regions on A won't be able
|
||||
* to be assigned to another live RS which causes the log replay unable to complete
|
||||
* because WAL edits replay depends on receiving RS to be live
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class LogReplayHandler extends EventHandler {
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
|
||||
/**
|
||||
* Implementors tote an HRegionInfo instance.
|
||||
* This is a marker interface that can be put on {@link EventHandler}s that
|
||||
* This is a marker interface that can be put on {@link java.beans.EventHandler}s that
|
||||
* have an {@link HRegionInfo}.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -98,7 +98,8 @@ public final class MasterSnapshotVerifier {
|
|||
/**
|
||||
* Verify that the snapshot in the directory is a valid snapshot
|
||||
* @param snapshotDir snapshot directory to check
|
||||
* @param snapshotServers {@link ServerName} of the servers that are involved in the snapshot
|
||||
* @param snapshotServers {@link org.apache.hadoop.hbase.ServerName} of the servers
|
||||
* that are involved in the snapshot
|
||||
* @throws CorruptedSnapshotException if the snapshot is invalid
|
||||
* @throws IOException if there is an unexpected connection issue to the filesystem
|
||||
*/
|
||||
|
|
|
@ -34,20 +34,23 @@ import org.apache.zookeeper.KeeperException;
|
|||
*
|
||||
* To implement a custom globally barriered procedure, user needs to extend two classes:
|
||||
* {@link MasterProcedureManager} and {@link RegionServerProcedureManager}. Implementation of
|
||||
* {@link MasterProcedureManager} is loaded into {@link HMaster} process via configuration
|
||||
* parameter 'hbase.procedure.master.classes', while implementation of
|
||||
* {@link RegionServerProcedureManager} is loaded into {@link HRegionServer} process via
|
||||
* {@link MasterProcedureManager} is loaded into {@link org.apache.hadoop.hbase.master.HMaster}
|
||||
* process via configuration parameter 'hbase.procedure.master.classes', while implementation of
|
||||
* {@link RegionServerProcedureManager} is loaded into
|
||||
* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process via
|
||||
* configuration parameter 'hbase.procedure.regionserver.classes'.
|
||||
*
|
||||
* An example of globally barriered procedure implementation is {@link SnapshotManager} and
|
||||
* {@link RegionServerSnapshotManager}.
|
||||
* An example of globally barriered procedure implementation is
|
||||
* {@link org.apache.hadoop.hbase.master.snapshot.SnapshotManager} and
|
||||
* {@link org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager}.
|
||||
*
|
||||
* A globally barriered procedure is identified by its signature (usually it is the name of the
|
||||
* procedure znode). During the initialization phase, the initialize methods are called by both
|
||||
* {@link HMaster} and {@link HRegionServer} witch create the procedure znode and register the
|
||||
* listeners. A procedure can be triggered by its signature and an instant name (encapsulated in
|
||||
* a {@link ProcedureDescription} object). When the servers are shutdown, the stop methods on both
|
||||
* classes are called to clean up the data associated with the procedure.
|
||||
* {@link org.apache.hadoop.hbase.master.HMaster}
|
||||
* and {@link org.apache.hadoop.hbase.regionserver.HRegionServer} which create the procedure znode
|
||||
* and register the listeners. A procedure can be triggered by its signature and an instant name
|
||||
* (encapsulated in a {@link ProcedureDescription} object). When the servers are shutdown,
|
||||
* the stop methods on both classes are called to clean up the data associated with the procedure.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
|
|
|
@ -27,8 +27,8 @@ import org.apache.zookeeper.KeeperException;
|
|||
|
||||
/**
|
||||
* Provides the globally barriered procedure framework and environment for
|
||||
* master oriented operations. {@link HMaster} interacts with the loaded
|
||||
* procedure manager through this class.
|
||||
* master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster}
|
||||
* interacts with the loaded procedure manager through this class.
|
||||
*/
|
||||
public class MasterProcedureManagerHost extends
|
||||
ProcedureManagerHost<MasterProcedureManager> {
|
||||
|
|
|
@ -29,7 +29,8 @@ import org.apache.zookeeper.KeeperException;
|
|||
|
||||
/**
|
||||
* Provides the globally barriered procedure framework and environment
|
||||
* for region server oriented operations. {@link HRegionServer} interacts
|
||||
* for region server oriented operations.
|
||||
* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} interacts
|
||||
* with the loaded procedure manager through this class.
|
||||
*/
|
||||
public class RegionServerProcedureManagerHost extends
|
||||
|
|
|
@ -23,8 +23,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
|||
/**
|
||||
* Wraps together the mutations which are applied as a batch to the region and their operation
|
||||
* status and WALEdits.
|
||||
* @see RegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress)
|
||||
* @see RegionObserver#postBatchMutate(ObserverContext, MiniBatchOperationInProgress)
|
||||
* @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
|
||||
* ObserverContext, MiniBatchOperationInProgress)
|
||||
* @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
|
||||
* ObserverContext, MiniBatchOperationInProgress)
|
||||
* @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
@ -61,7 +63,8 @@ public class MiniBatchOperationInProgress<T> {
|
|||
|
||||
/**
|
||||
* Sets the status code for the operation(Mutation) at the specified position.
|
||||
* By setting this status, {@link RegionObserver} can make HRegion to skip Mutations.
|
||||
* By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
|
||||
* can make HRegion to skip Mutations.
|
||||
* @param index
|
||||
* @param opStatus
|
||||
*/
|
||||
|
|
|
@ -54,7 +54,8 @@ public interface RegionScanner extends InternalScanner {
|
|||
boolean reseek(byte[] row) throws IOException;
|
||||
|
||||
/**
|
||||
* @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)}
|
||||
* @return The preferred max buffersize. See
|
||||
* {@link org.apache.hadoop.hbase.client.Scan#setMaxResultSize(long)}
|
||||
*/
|
||||
long getMaxResultSize();
|
||||
|
||||
|
|
|
@ -45,9 +45,9 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
|
||||
/**
|
||||
* This worker is spawned in every regionserver, including master. The Worker waits for log
|
||||
* splitting tasks to be put up by the {@link SplitLogManager} running in the master and races with
|
||||
* other workers in other serves to acquire those tasks. The coordination is done via coordination
|
||||
* engine.
|
||||
* splitting tasks to be put up by the {@link org.apache.hadoop.hbase.master.SplitLogManager}
|
||||
* running in the master and races with other workers in other serves to acquire those tasks.
|
||||
* The coordination is done via coordination engine.
|
||||
* <p>
|
||||
* If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task.
|
||||
* It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED
|
||||
|
@ -186,7 +186,8 @@ public class SplitLogWorker implements Runnable {
|
|||
* acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
|
||||
* guarantee that two workers will not be executing the same task therefore it
|
||||
* is better to have workers prepare the task and then have the
|
||||
* {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
|
||||
* {@link org.apache.hadoop.hbase.master.SplitLogManager} commit the work in
|
||||
* SplitLogManager.TaskFinisher
|
||||
*/
|
||||
public interface TaskExecutor {
|
||||
enum Status {
|
||||
|
|
|
@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.S
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
/**
|
||||
* A {@link ReplicationEndpoint} implementation for replicating to another HBase cluster.
|
||||
* A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
|
||||
* implementation for replicating to another HBase cluster.
|
||||
* For the slave cluster it selects a random number of peers
|
||||
* using a replication ratio. For example, if replication ration = 0.1
|
||||
* and slave cluster has 100 region servers, 10 will be selected.
|
||||
|
|
|
@ -79,8 +79,9 @@ import com.google.common.cache.CacheBuilder;
|
|||
import com.google.protobuf.ServiceException;
|
||||
|
||||
/**
|
||||
* A {@link ReplicationEndpoint} endpoint which receives the WAL edits from the
|
||||
* WAL, and sends the edits to replicas of regions.
|
||||
* A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
|
||||
* which receives the WAL edits from the WAL, and sends the edits to replicas
|
||||
* of regions.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
|
|||
|
||||
/**
|
||||
* Handles Bloom filter initialization based on configuration and serialized
|
||||
* metadata in the reader and writer of {@link StoreFile}.
|
||||
* metadata in the reader and writer of {@link org.apache.hadoop.hbase.regionserver.StoreFile}.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class BloomFilterFactory {
|
||||
|
|
|
@ -1706,7 +1706,7 @@ public abstract class FSUtils {
|
|||
* This function is to scan the root path of the file system to get the
|
||||
* degree of locality for each region on each of the servers having at least
|
||||
* one block of that region.
|
||||
* This is used by the tool {@link RegionPlacementMaintainer}
|
||||
* This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer}
|
||||
*
|
||||
* @param conf
|
||||
* the configuration to use
|
||||
|
|
|
@ -134,7 +134,8 @@ public class HFileArchiveUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the full path to the archive directory on the configured {@link FileSystem}
|
||||
* Get the full path to the archive directory on the configured
|
||||
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
|
||||
* @param conf to look for archive directory name and root directory. Cannot be null. Notes for
|
||||
* testing: requires a FileSystem root directory to be specified.
|
||||
* @return the full {@link Path} to the archive directory, as defined by the configuration
|
||||
|
@ -145,7 +146,8 @@ public class HFileArchiveUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get the full path to the archive directory on the configured {@link FileSystem}
|
||||
* Get the full path to the archive directory on the configured
|
||||
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
|
||||
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building
|
||||
* the archive path)
|
||||
* @return the full {@link Path} to the archive directory, as defined by the configuration
|
||||
|
|
|
@ -28,9 +28,9 @@ import org.apache.zookeeper.KeeperException;
|
|||
|
||||
/**
|
||||
* Tracker on cluster settings up in zookeeper.
|
||||
* This is not related to {@link ClusterStatus}. That class is a data structure
|
||||
* that holds snapshot of current view on cluster. This class is about tracking
|
||||
* cluster attributes up in zookeeper.
|
||||
* This is not related to {@link org.apache.hadoop.hbase.ClusterStatus}. That class
|
||||
* is a data structure that holds snapshot of current view on cluster. This class
|
||||
* is about tracking cluster attributes up in zookeeper.
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -35,7 +35,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSeq
|
|||
import org.apache.zookeeper.KeeperException;
|
||||
|
||||
/**
|
||||
* Common methods and attributes used by {@link SplitLogManager} and {@link SplitLogWorker}
|
||||
* Common methods and attributes used by {@link org.apache.hadoop.hbase.master.SplitLogManager}
|
||||
* and {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
|
||||
* running distributed splitting of WAL logs.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
|
Loading…
Reference in New Issue