HBASE-18687 Add @since 2.0.0 to new classes
This commit is contained in:
parent
f74cf679ec
commit
3e1c598d8e
|
@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
/**
|
||||
* The asynchronous meta table accessor. Used to read/write region and assignment information store
|
||||
* in <code>hbase:meta</code>.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AsyncMetaTableAccessor {
|
||||
|
|
|
@ -52,6 +52,7 @@ import com.google.protobuf.RpcChannel;
|
|||
* <p>
|
||||
* This feature is still under development, so marked as IA.Private. Will change to public when
|
||||
* done. Use it with caution.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncAdmin {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* For creating {@link AsyncAdmin}. The implementation should have default configurations set before
|
||||
* returning the builder to user. So users are free to only set the configs they care about to
|
||||
* create a new AsyncAdmin instance.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncAdminBuilder {
|
||||
|
|
|
@ -26,6 +26,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
|
||||
|
||||
/**
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AsyncAdminRequestRetryingCaller<T> extends AsyncRpcRetryingCaller<T> {
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* The asynchronous version of Connection.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncConnection extends Closeable {
|
||||
|
|
|
@ -58,6 +58,7 @@ import com.google.protobuf.RpcChannel;
|
|||
|
||||
/**
|
||||
* The implementation of AsyncAdmin.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AsyncHBaseAdmin implements AsyncAdmin {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterServ
|
|||
|
||||
/**
|
||||
* Retry caller for a request call to master.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AsyncMasterRequestRpcRetryingCaller<T> extends AsyncRpcRetryingCaller<T> {
|
||||
|
|
|
@ -28,7 +28,8 @@ import java.util.List;
|
|||
* 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts),
|
||||
* then errors and failed operations in this object will reflect global errors.
|
||||
* 2) If submit call is made with needResults false, results will not be saved.
|
||||
* */
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface AsyncRequestFuture {
|
||||
public boolean hasError();
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon
|
|||
|
||||
/**
|
||||
* Factory to create an AsyncRpcRetryCaller.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class AsyncRpcRetryingCallerFactory {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* The implementation should make sure that user can do everything they want to the returned
|
||||
* {@code CompletableFuture} without breaking anything. Usually the implementation will require user
|
||||
* to provide a {@code ExecutorService}.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncTable extends AsyncTableBase {
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* <p>
|
||||
* Usually the implementation will not throw any exception directly. You need to get the exception
|
||||
* from the returned {@link CompletableFuture}.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncTableBase {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* The implementation should have default configurations set before returning the builder to user.
|
||||
* So users are free to only set the configs they care about to create a new
|
||||
* AsyncTable/RawAsyncTable instance.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncTableBuilder<T extends AsyncTableBase> {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* <p>
|
||||
* Usually the implementations will not throw any exception directly, you need to get the exception
|
||||
* from the returned {@link CompletableFuture}.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface AsyncTableRegionLocator {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* <p>
|
||||
* If user setBatch(5) and rpc returns 3+5+5+5+3 cells, we should return 5+5+5+5+1 to user. setBatch
|
||||
* doesn't mean setAllowPartialResult(true).
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class BatchScanResultCache implements ScanResultCache {
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* It is used as input when creating a table or adding a column.
|
||||
*
|
||||
* To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface ColumnFamilyDescriptor {
|
||||
|
|
|
@ -41,6 +41,9 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.PrettyPrinter;
|
||||
import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
|
||||
|
||||
/**
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class ColumnFamilyDescriptorBuilder {
|
||||
// For future backward compatibility
|
||||
|
|
|
@ -255,6 +255,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
|
||||
/**
|
||||
* The implementation of AsyncAdmin.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RawAsyncHBaseAdmin implements AsyncAdmin {
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* method. The {@link RawScanResultConsumer} exposes the implementation details of a scan(heartbeat)
|
||||
* so it is not suitable for a normal user. If it is still the only difference after we implement
|
||||
* most features of AsyncTable, we can think about merge these two interfaces.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface RawAsyncTable extends AsyncTableBase {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
|||
* HBase in background while you process the returned data, you need to move the processing work to
|
||||
* another thread to make the {@code onNext} call return immediately. And please do NOT do any time
|
||||
* consuming tasks in all methods below unless you know what you are doing.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public interface RawScanResultConsumer {
|
||||
|
|
|
@ -47,6 +47,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
|||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class TableDescriptorBuilder {
|
||||
public static final Log LOG = LogFactory.getLog(TableDescriptorBuilder.class);
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
|
||||
/**
|
||||
* Netty client for the requests and responses.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class NettyRpcClient extends AbstractRpcClient<NettyRpcConnection> {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
* As hadoop Configuration can not pass an Object directly, we need to find a way to pass the
|
||||
* EventLoopGroup to {@code AsyncRpcClient} if we want to use a single {@code EventLoopGroup} for
|
||||
* the whole process.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class NettyRpcClientConfigHelper {
|
||||
|
|
|
@ -65,6 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
* <p>
|
||||
* Most operations are executed in handlers. Netty handler is always executed in the same
|
||||
* thread(EventLoop) so no lock is needed.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyRpcConnection extends RpcConnection {
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
|
||||
/**
|
||||
* The netty rpc handler.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyRpcDuplexHandler extends ChannelDuplexHandler {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
|
|||
|
||||
/**
|
||||
* Implement logic to deal with the rpc connection header.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHandler<ByteBuf> {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
|
||||
/**
|
||||
* Implement SASL logic for netty rpc client.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
|||
|
||||
/**
|
||||
* Implement SASL logic for netty rpc client.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler<ByteBuf> {
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
|
|||
|
||||
/**
|
||||
* An RPC server with Netty4 implementation.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class NettyRpcServer extends RpcServer {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* Handle connection preamble.
|
||||
* @since 2.0.0`
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyRpcServerPreambleHandler extends SimpleChannelInboundHandler<ByteBuf> {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* Decoder for rpc request.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyRpcServerRequestDecoder extends ChannelInboundHandlerAdapter {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* Encoder for {@link RpcResponse}.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyRpcServerResponseEncoder extends ChannelOutboundHandlerAdapter {
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.htrace.TraceInfo;
|
|||
/**
|
||||
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
|
||||
* result.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyServerCall extends ServerCall<NettyServerRpcConnection> {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.htrace.TraceInfo;
|
|||
|
||||
/**
|
||||
* RpcConnection implementation for netty rpc server.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class NettyServerRpcConnection extends ServerRpcConnection {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
|
||||
/**
|
||||
* Helper class for passing netty event loop config to {@link AsyncFSWALProvider}.
|
||||
* @since 2.0.0
|
||||
*/
|
||||
public class NettyAsyncFSWALConfigHelper {
|
||||
|
||||
|
|
Loading…
Reference in New Issue