HBASE-11891 Introduce an HBaseInterfaceAudience level to denote class names that appear in configs.

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Sean Busbey 2014-09-03 23:23:16 -05:00 committed by Andrew Purtell
parent 8b5582370e
commit 39609a556a
42 changed files with 92 additions and 40 deletions

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
@ -176,6 +177,7 @@ class ClusterStatusListener implements Closeable {
/** /**
* An implementation using a multicast message between the master & the client. * An implementation using a multicast message between the master & the client.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
class MulticastListener implements Listener { class MulticastListener implements Listener {
private DatagramChannel channel; private DatagramChannel channel;
private final EventLoopGroup group = new NioEventLoopGroup( private final EventLoopGroup group = new NioEventLoopGroup(

View File

@ -29,4 +29,8 @@ public class HBaseInterfaceAudience {
public static final String COPROC = "Coprocesssor"; public static final String COPROC = "Coprocesssor";
public static final String REPLICATION = "Replication"; public static final String REPLICATION = "Replication";
public static final String PHOENIX = "Phoenix"; public static final String PHOENIX = "Phoenix";
/**
* Denotes class names that appear in user facing configuration files.
*/
public static final String CONFIG = "Configuration";
} }

View File

@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -32,7 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* delimiting all lengths. Profligate. Needs tune up. * delimiting all lengths. Profligate. Needs tune up.
* Note: This will not write tags of a Cell. * Note: This will not write tags of a Cell.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class CellCodec implements Codec { public class CellCodec implements Codec {
static class CellEncoder extends BaseEncoder { static class CellEncoder extends BaseEncoder {
CellEncoder(final OutputStream out) { CellEncoder(final OutputStream out) {

View File

@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -32,7 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Uses ints delimiting all lengths. Profligate. Needs tune up. * Uses ints delimiting all lengths. Profligate. Needs tune up.
* <b>Use this Codec only at server side.</b> * <b>Use this Codec only at server side.</b>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class CellCodecWithTags implements Codec { public class CellCodecWithTags implements Codec {
static class CellEncoder extends BaseEncoder { static class CellEncoder extends BaseEncoder {
CellEncoder(final OutputStream out) { CellEncoder(final OutputStream out) {
@ -119,4 +120,4 @@ public class CellCodecWithTags implements Codec {
public Encoder getEncoder(OutputStream os) { public Encoder getEncoder(OutputStream os) {
return new CellEncoder(os); return new CellEncoder(os);
} }
} }

View File

@ -23,6 +23,7 @@ import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
@ -43,7 +44,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
* KeyValue2 backing array * KeyValue2 backing array
* </pre> * </pre>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class KeyValueCodec implements Codec { public class KeyValueCodec implements Codec {
public static class KeyValueEncoder extends BaseEncoder { public static class KeyValueEncoder extends BaseEncoder {
public KeyValueEncoder(final OutputStream out) { public KeyValueEncoder(final OutputStream out) {
@ -82,4 +83,4 @@ public class KeyValueCodec implements Codec {
public Encoder getEncoder(OutputStream os) { public Encoder getEncoder(OutputStream os) {
return new KeyValueEncoder(os); return new KeyValueEncoder(os);
} }
} }

View File

@ -23,6 +23,7 @@ import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
@ -49,7 +50,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
* Note: The only difference of this with KeyValueCodec is the latter ignores tags in KeyValues. * Note: The only difference of this with KeyValueCodec is the latter ignores tags in KeyValues.
* <b>Use this Codec only at server side.</b> * <b>Use this Codec only at server side.</b>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class KeyValueCodecWithTags implements Codec { public class KeyValueCodecWithTags implements Codec {
public static class KeyValueEncoder extends BaseEncoder { public static class KeyValueEncoder extends BaseEncoder {
public KeyValueEncoder(final OutputStream out) { public KeyValueEncoder(final OutputStream out) {
@ -88,4 +89,4 @@ public class KeyValueCodecWithTags implements Codec {
public Encoder getEncoder(OutputStream os) { public Encoder getEncoder(OutputStream os) {
return new KeyValueEncoder(os); return new KeyValueEncoder(os);
} }
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
@ -39,7 +40,7 @@ import org.apache.zookeeper.KeeperException;
* be), since it may take a little time for the ZK notification to propagate, in which case we may * be), since it may take a little time for the ZK notification to propagate, in which case we may
* accidentally delete some files. * accidentally delete some files.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
private static final Log LOG = LogFactory.getLog(LongTermArchivingHFileCleaner.class); private static final Log LOG = LogFactory.getLog(LongTermArchivingHFileCleaner.class);

View File

@ -25,13 +25,14 @@ import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
/** /**
* Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp.
* Use a different codec if you want that in the stream. * Use a different codec if you want that in the stream.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class MessageCodec implements Codec { public class MessageCodec implements Codec {
static class MessageEncoder extends BaseEncoder { static class MessageEncoder extends BaseEncoder {
MessageEncoder(final OutputStream out) { MessageEncoder(final OutputStream out) {

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.coordination;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.TableStateManager;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
@ -29,7 +30,7 @@ import org.apache.zookeeper.KeeperException;
/** /**
* ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager {
protected Server server; protected Server server;
protected ZooKeeperWatcher watcher; protected ZooKeeperWatcher watcher;

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG})
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class BaseMasterObserver implements MasterObserver { public class BaseMasterObserver implements MasterObserver {
@Override @Override

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.http.conf.ConfServlet; import org.apache.hadoop.hbase.http.conf.ConfServlet;
import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet; import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet;
import org.apache.hadoop.hbase.http.log.LogLevel; import org.apache.hadoop.hbase.http.log.LogLevel;
@ -1222,6 +1223,7 @@ public class HttpServer implements FilterContainer {
* parameter names and values. The goal is to quote the characters to make * parameter names and values. The goal is to quote the characters to make
* all of the servlets resistant to cross-site scripting attacks. * all of the servlets resistant to cross-site scripting attacks.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public static class QuotingInputFilter implements Filter { public static class QuotingInputFilter implements Filter {
private FilterConfig config; private FilterConfig config;

View File

@ -26,6 +26,10 @@ import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class NoCacheFilter implements Filter { public class NoCacheFilter implements Filter {
@Override @Override

View File

@ -31,7 +31,9 @@ import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.http.FilterContainer; import org.apache.hadoop.hbase.http.FilterContainer;
import org.apache.hadoop.hbase.http.FilterInitializer; import org.apache.hadoop.hbase.http.FilterInitializer;
@ -44,6 +46,7 @@ import static org.apache.hadoop.hbase.http.ServerConfigurationKeys.DEFAULT_HBASE
* Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who)
* so that the web UI is usable for a secure cluster without authentication. * so that the web UI is usable for a secure cluster without authentication.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class StaticUserWebFilter extends FilterInitializer { public class StaticUserWebFilter extends FilterInitializer {
static final String DEPRECATED_UGI_KEY = "dfs.web.ugi"; static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
@ -77,6 +80,7 @@ public class StaticUserWebFilter extends FilterInitializer {
} }
} }
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public static class StaticUserFilter implements Filter { public static class StaticUserFilter implements Filter {
private User user; private User user;
private String username; private String username;

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
@ -233,6 +234,7 @@ public class ClusterStatusPublisher extends Chore {
void close(); void close();
} }
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public static class MulticastPublisher implements Publisher { public static class MulticastPublisher implements Publisher {
private DatagramChannel channel; private DatagramChannel channel;
private final EventLoopGroup group = new NioEventLoopGroup( private final EventLoopGroup group = new NioEventLoopGroup(

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
@ -53,7 +54,7 @@ import org.apache.hadoop.hbase.util.Pair;
* read latencies for the regions even when their primary region servers die. * read latencies for the regions even when their primary region servers die.
* *
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class FavoredNodeLoadBalancer extends BaseLoadBalancer { public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class); private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class);
@ -344,4 +345,4 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion); globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion);
} }
} }
} }

View File

@ -31,6 +31,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.AssignmentManager;
@ -54,7 +55,7 @@ import com.google.common.collect.MinMaxPriorityQueue;
* *
* <p>This classes produces plans for the {@link AssignmentManager} to execute. * <p>This classes produces plans for the {@link AssignmentManager} to execute.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SimpleLoadBalancer extends BaseLoadBalancer { public class SimpleLoadBalancer extends BaseLoadBalancer {
private static final Log LOG = LogFactory.getLog(SimpleLoadBalancer.class); private static final Log LOG = LogFactory.getLog(SimpleLoadBalancer.class);
private static final Random RANDOM = new Random(System.currentTimeMillis()); private static final Random RANDOM = new Random(System.currentTimeMillis());

View File

@ -34,6 +34,7 @@ import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
@ -91,7 +92,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
* <p>This balancer is best used with hbase.master.loadbalance.bytable set to false * <p>This balancer is best used with hbase.master.loadbalance.bytable set to false
* so that the balancer gets the full picture of all loads on the cluster.</p> * so that the balancer gets the full picture of all loads on the cluster.</p>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class StochasticLoadBalancer extends BaseLoadBalancer { public class StochasticLoadBalancer extends BaseLoadBalancer {
protected static final String STEPS_PER_REGION_KEY = protected static final String STEPS_PER_REGION_KEY =

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* /hbase/archive/table/region/cf/.links-hfile/ref-region.ref-table * /hbase/archive/table/region/cf/.links-hfile/ref-region.ref-table
* To check if the hfile can be deleted the back references folder must be empty. * To check if the hfile can be deleted the back references folder must be empty.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class HFileLinkCleaner extends BaseHFileCleanerDelegate { public class HFileLinkCleaner extends BaseHFileCleanerDelegate {
private static final Log LOG = LogFactory.getLog(HFileLinkCleaner.class); private static final Log LOG = LogFactory.getLog(HFileLinkCleaner.class);

View File

@ -22,13 +22,14 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/** /**
* HFile cleaner that uses the timestamp of the hfile to determine if it should be deleted. By * HFile cleaner that uses the timestamp of the hfile to determine if it should be deleted. By
* default they are allowed to live for {@value #DEFAULT_TTL} * default they are allowed to live for {@value #DEFAULT_TTL}
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate { public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate {
public static final Log LOG = LogFactory.getLog(TimeToLiveHFileCleaner.class.getName()); public static final Log LOG = LogFactory.getLog(TimeToLiveHFileCleaner.class.getName());

View File

@ -22,13 +22,14 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/** /**
* Log cleaner that uses the timestamp of the hlog to determine if it should * Log cleaner that uses the timestamp of the hlog to determine if it should
* be deleted. By default they are allowed to live for 10 minutes. * be deleted. By default they are allowed to live for 10 minutes.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate { public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate {
static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName()); static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName());
// Configured time a log can be kept after it was closed // Configured time a log can be kept after it was closed

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
@ -36,7 +37,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* Implementation of a file cleaner that checks if a hfile is still used by snapshots of HBase * Implementation of a file cleaner that checks if a hfile is still used by snapshots of HBase
* tables. * tables.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
private static final Log LOG = LogFactory.getLog(SnapshotHFileCleaner.class); private static final Log LOG = LogFactory.getLog(SnapshotHFileCleaner.class);

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
@ -36,7 +37,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* Implementation of a log cleaner that checks if a log is still used by * Implementation of a log cleaner that checks if a log is still used by
* snapshots of HBase tables. * snapshots of HBase tables.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class SnapshotLogCleaner extends BaseLogCleanerDelegate { public class SnapshotLogCleaner extends BaseLogCleanerDelegate {
private static final Log LOG = LogFactory.getLog(SnapshotLogCleaner.class); private static final Log LOG = LogFactory.getLog(SnapshotLogCleaner.class);

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
@ -88,7 +89,7 @@ import org.apache.zookeeper.KeeperException;
* Note: Currently there can only be one snapshot being taken at a time over the cluster. This is a * Note: Currently there can only be one snapshot being taken at a time over the cluster. This is a
* simplification in the current implementation. * simplification in the current implementation.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class SnapshotManager extends MasterProcedureManager implements Stoppable { public class SnapshotManager extends MasterProcedureManager implements Stoppable {
private static final Log LOG = LogFactory.getLog(SnapshotManager.class); private static final Log LOG = LogFactory.getLog(SnapshotManager.class);

View File

@ -26,7 +26,9 @@ import java.util.Set;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -47,6 +49,7 @@ import org.apache.zookeeper.KeeperException;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class MasterFlushTableProcedureManager extends MasterProcedureManager { public class MasterFlushTableProcedureManager extends MasterProcedureManager {
public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
@ -194,4 +197,4 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
return proc.isCompleted(); return proc.isCompleted();
} }
} }

View File

@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.DaemonThreadFactory;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
@ -52,7 +53,7 @@ import org.apache.zookeeper.KeeperException;
/** /**
* This manager class handles flushing of the regions for table on a {@link HRegionServer}. * This manager class handles flushing of the regions for table on a {@link HRegionServer}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class RegionServerFlushTableProcedureManager extends RegionServerProcedureManager { public class RegionServerFlushTableProcedureManager extends RegionServerProcedureManager {
private static final Log LOG = LogFactory.getLog(RegionServerFlushTableProcedureManager.class); private static final Log LOG = LogFactory.getLog(RegionServerFlushTableProcedureManager.class);

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -31,7 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
* changed to {@link IncreasingToUpperBoundRegionSplitPolicy} * changed to {@link IncreasingToUpperBoundRegionSplitPolicy}
* </p> * </p>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
private long desiredMaxFileSize; private long desiredMaxFileSize;

View File

@ -24,6 +24,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
* Default StoreEngine creates the default compactor, policy, and store file manager, or * Default StoreEngine creates the default compactor, policy, and store file manager, or
* their derivatives. * their derivatives.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class DefaultStoreEngine extends StoreEngine< public class DefaultStoreEngine extends StoreEngine<
DefaultStoreFlusher, RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> { DefaultStoreFlusher, RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> {

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
/** /**
* A {@link RegionSplitPolicy} that disables region splits. * A {@link RegionSplitPolicy} that disables region splits.
@ -26,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
* Most of the time, using {@link ConstantSizeRegionSplitPolicy} with a * Most of the time, using {@link ConstantSizeRegionSplitPolicy} with a
* large region size (10GB, etc) is safer. * large region size (10GB, etc) is safer.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class DisabledRegionSplitPolicy extends RegionSplitPolicy { public class DisabledRegionSplitPolicy extends RegionSplitPolicy {
@Override @Override
protected boolean shouldSplit() { protected boolean shouldSplit() {

View File

@ -23,6 +23,7 @@ import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
@ -35,7 +36,7 @@ import com.google.common.base.Preconditions;
* 0.94.0 * 0.94.0
* @see ConstantSizeRegionSplitPolicy Default split policy before 0.94.0 * @see ConstantSizeRegionSplitPolicy Default split policy before 0.94.0
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public abstract class RegionSplitPolicy extends Configured { public abstract class RegionSplitPolicy extends Configured {
private static final Class<? extends RegionSplitPolicy> private static final Class<? extends RegionSplitPolicy>
DEFAULT_SPLIT_POLICY_CLASS = IncreasingToUpperBoundRegionSplitPolicy.class; DEFAULT_SPLIT_POLICY_CLASS = IncreasingToUpperBoundRegionSplitPolicy.class;

View File

@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
@ -37,7 +38,7 @@ import com.google.common.base.Preconditions;
/** /**
* The storage engine that implements the stripe-based store/compaction scheme. * The storage engine that implements the stripe-based store/compaction scheme.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class StripeStoreEngine extends StoreEngine<StripeStoreFlusher, public class StripeStoreEngine extends StoreEngine<StripeStoreFlusher,
StripeCompactionPolicy, StripeCompactor, StripeStoreFileManager> { StripeCompactionPolicy, StripeCompactor, StripeStoreFileManager> {
static final Log LOG = LogFactory.getLog(StripeStoreEngine.class); static final Log LOG = LogFactory.getLog(StripeStoreEngine.class);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.DaemonThreadFactory;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
@ -71,7 +72,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
* <p> * <p>
* On shutdown, requires {@link #stop(boolean)} to be called * On shutdown, requires {@link #stop(boolean)} to be called
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Unstable @InterfaceStability.Unstable
public class RegionServerSnapshotManager extends RegionServerProcedureManager { public class RegionServerSnapshotManager extends RegionServerProcedureManager {
private static final Log LOG = LogFactory.getLog(RegionServerSnapshotManager.class); private static final Log LOG = LogFactory.getLog(RegionServerSnapshotManager.class);

View File

@ -55,7 +55,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
* which is appended at the end of the WAL. This is empty for now; it can contain some meta * which is appended at the end of the WAL. This is empty for now; it can contain some meta
* information such as Region level stats, etc in future. * information such as Region level stats, etc in future.
*/ */
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class ProtobufLogReader extends ReaderBase { public class ProtobufLogReader extends ReaderBase {
private static final Log LOG = LogFactory.getLog(ProtobufLogReader.class); private static final Log LOG = LogFactory.getLog(ProtobufLogReader.class);
static final byte[] PB_WAL_MAGIC = Bytes.toBytes("PWAL"); static final byte[] PB_WAL_MAGIC = Bytes.toBytes("PWAL");

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.codec.Codec;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
/** /**
* Writer for protobuf-based WAL. * Writer for protobuf-based WAL.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ProtobufLogWriter extends WriterBase { public class ProtobufLogWriter extends WriterBase {
private final Log LOG = LogFactory.getLog(this.getClass()); private final Log LOG = LogFactory.getLog(this.getClass());
protected FSDataOutputStream output; protected FSDataOutputStream output;

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Decryptor; import org.apache.hadoop.hbase.io.crypto.Decryptor;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WALHdrResult;
import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SecureProtobufLogReader extends ProtobufLogReader { public class SecureProtobufLogReader extends ProtobufLogReader {
private static final Log LOG = LogFactory.getLog(SecureProtobufLogReader.class); private static final Log LOG = LogFactory.getLog(SecureProtobufLogReader.class);

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.Encryption;
@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader;
import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SecureProtobufLogWriter extends ProtobufLogWriter { public class SecureProtobufLogWriter extends ProtobufLogWriter {
private static final Log LOG = LogFactory.getLog(SecureProtobufLogWriter.class); private static final Log LOG = LogFactory.getLog(SecureProtobufLogWriter.class);

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Metadata; import org.apache.hadoop.io.SequenceFile.Metadata;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class SequenceFileLogReader extends ReaderBase { public class SequenceFileLogReader extends ReaderBase {
private static final Log LOG = LogFactory.getLog(SequenceFileLogReader.class); private static final Log LOG = LogFactory.getLog(SequenceFileLogReader.class);

View File

@ -47,7 +47,7 @@ import com.google.protobuf.ByteString;
* This codec is used at server side for writing cells to WAL as well as for sending edits * This codec is used at server side for writing cells to WAL as well as for sending edits
* as part of the distributed splitting process. * as part of the distributed splitting process.
*/ */
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class WALCellCodec implements Codec { public class WALCellCodec implements Codec {
/** Configuration key for the class to use when encoding cells in the WAL */ /** Configuration key for the class to use when encoding cells in the WAL */
public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec"; public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationException;
@ -43,7 +44,7 @@ import com.google.common.collect.Sets;
* Implementation of a log cleaner that checks if a log is still scheduled for * Implementation of a log cleaner that checks if a log is still scheduled for
* replication before deleting it when its TTL is over. * replication before deleting it when its TTL is over.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements Abortable { public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements Abortable {
private static final Log LOG = LogFactory.getLog(ReplicationLogCleaner.class); private static final Log LOG = LogFactory.getLog(ReplicationLogCleaner.class);
private ZooKeeperWatcher zkw; private ZooKeeperWatcher zkw;

View File

@ -35,8 +35,9 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class GzipFilter implements Filter { public class GzipFilter implements Filter {
private Set<String> mimeTypes = new HashSet<String>(); private Set<String> mimeTypes = new HashSet<String>();

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -144,7 +145,7 @@ import com.google.protobuf.Service;
* commands. * commands.
* </p> * </p>
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class AccessController extends BaseMasterAndRegionObserver public class AccessController extends BaseMasterAndRegionObserver
implements RegionServerObserver, implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver { AccessControlService.Interface, CoprocessorService, EndpointObserver {

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -110,7 +111,7 @@ import com.google.protobuf.Service;
* Coprocessor that has both the MasterObserver and RegionObserver implemented that supports in * Coprocessor that has both the MasterObserver and RegionObserver implemented that supports in
* visibility labels * visibility labels
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class VisibilityController extends BaseMasterAndRegionObserver implements public class VisibilityController extends BaseMasterAndRegionObserver implements
VisibilityLabelsService.Interface, CoprocessorService { VisibilityLabelsService.Interface, CoprocessorService {

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
@ -44,7 +45,7 @@ import org.apache.hadoop.io.compress.DefaultCodec;
* Implementation of {@link HLog.Writer} that delegates to * Implementation of {@link HLog.Writer} that delegates to
* SequenceFile.Writer. Legacy implementation only used for compat tests. * SequenceFile.Writer. Legacy implementation only used for compat tests.
*/ */
@InterfaceAudience.Private @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SequenceFileLogWriter extends WriterBase { public class SequenceFileLogWriter extends WriterBase {
private final Log LOG = LogFactory.getLog(this.getClass()); private final Log LOG = LogFactory.getLog(this.getClass());
// The sequence file we delegate to. // The sequence file we delegate to.