diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 3f8a6a58965..c187fc42881 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; @@ -176,6 +177,7 @@ class ClusterStatusListener implements Closeable { /** * An implementation using a multicast message between the master & the client. */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) class MulticastListener implements Listener { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup( diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java index 4010dc0709d..cf2116dccd9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java @@ -29,4 +29,8 @@ public class HBaseInterfaceAudience { public static final String COPROC = "Coprocesssor"; public static final String REPLICATION = "Replication"; public static final String PHOENIX = "Phoenix"; + /** + * Denotes class names that appear in user facing configuration files. + */ + public static final String CONFIG = "Configuration"; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java index b09da45336d..9eda546fb2c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java @@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; /** @@ -32,7 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes; * delimiting all lengths. Profligate. Needs tune up. * Note: This will not write tags of a Cell. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class CellCodec implements Codec { static class CellEncoder extends BaseEncoder { CellEncoder(final OutputStream out) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java index 809a65f87d5..08ec1cbf35d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java @@ -25,6 +25,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; /** @@ -32,7 +33,7 @@ import org.apache.hadoop.hbase.util.Bytes; * Uses ints delimiting all lengths. Profligate. Needs tune up. * Use this Codec only at server side. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class CellCodecWithTags implements Codec { static class CellEncoder extends BaseEncoder { CellEncoder(final OutputStream out) { @@ -119,4 +120,4 @@ public class CellCodecWithTags implements Codec { public Encoder getEncoder(OutputStream os) { return new CellEncoder(os); } -} \ No newline at end of file +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index 2e67297f9a6..302d61edfa4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -43,7 +44,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; * KeyValue2 backing array * */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class KeyValueCodec implements Codec { public static class KeyValueEncoder extends BaseEncoder { public KeyValueEncoder(final OutputStream out) { @@ -82,4 +83,4 @@ public class KeyValueCodec implements Codec { public Encoder getEncoder(OutputStream os) { return new KeyValueEncoder(os); } -} \ No newline at end of file +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java index 5b532e9a207..6bdfc2c8826 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodecWithTags.java @@ -23,6 +23,7 @@ import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -49,7 +50,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; * Note: The only difference of this with KeyValueCodec is the latter ignores tags in KeyValues. * Use this Codec only at server side. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class KeyValueCodecWithTags implements Codec { public static class KeyValueEncoder extends BaseEncoder { public KeyValueEncoder(final OutputStream out) { @@ -88,4 +89,4 @@ public class KeyValueCodecWithTags implements Codec { public Encoder getEncoder(OutputStream os) { return new KeyValueEncoder(os); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java index ec5a48f4ae5..e56c877f2f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.util.FSUtils; @@ -39,7 +40,7 @@ import org.apache.zookeeper.KeeperException; * be), since it may take a little time for the ZK notification to propagate, in which case we may * accidentally delete some files. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { private static final Log LOG = LogFactory.getLog(LongTermArchivingHFileCleaner.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java index 00b713c8409..8370fcca8ac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java @@ -25,13 +25,14 @@ import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; /** * Codec that just writes out Cell as a protobuf Cell Message. Does not write the mvcc stamp. * Use a different codec if you want that in the stream. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class MessageCodec implements Codec { static class MessageEncoder extends BaseEncoder { MessageEncoder(final OutputStream out) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 89a8b198c40..91db2c2edb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; @@ -30,7 +31,7 @@ import org.apache.zookeeper.KeeperException; /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { private static final Log LOG = LogFactory.getLog(ZkCoordinatedStateManager.class); protected Server server; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 21021ad0b13..da190f8ace1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import java.io.IOException; import java.util.List; -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG}) @InterfaceStability.Evolving public class BaseMasterObserver implements MasterObserver { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 857d0adb53d..99e04c71cc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -52,6 +52,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.conf.ConfServlet; import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet; import org.apache.hadoop.hbase.http.log.LogLevel; @@ -1222,6 +1223,7 @@ public class HttpServer implements FilterContainer { * parameter names and values. The goal is to quote the characters to make * all of the servlets resistant to cross-site scripting attacks. */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class QuotingInputFilter implements Filter { private FilterConfig config; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index fffbd8e0845..889f9e94a59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -26,6 +26,10 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class NoCacheFilter implements Filter { @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 9e2f1574a41..4b599c4b879 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -31,7 +31,9 @@ import javax.servlet.http.HttpServletRequestWrapper; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.FilterContainer; import org.apache.hadoop.hbase.http.FilterInitializer; @@ -44,6 +46,7 @@ import static org.apache.hadoop.hbase.http.ServerConfigurationKeys.DEFAULT_HBASE * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) * so that the web UI is usable for a secure cluster without authentication. */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StaticUserWebFilter extends FilterInitializer { static final String DEPRECATED_UGI_KEY = "dfs.web.ugi"; @@ -77,6 +80,7 @@ public class StaticUserWebFilter extends FilterInitializer { } } + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class StaticUserFilter implements Filter { private User user; private String username; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 85ec8cfc5c6..ce279f52d8a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -35,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; @@ -233,6 +234,7 @@ public class ClusterStatusPublisher extends Chore { void close(); } + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class MulticastPublisher implements Publisher { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java index d1a785efde4..cc80c726904 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerLoad; @@ -53,7 +54,7 @@ import org.apache.hadoop.hbase.util.Pair; * read latencies for the regions even when their primary region servers die. * */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class FavoredNodeLoadBalancer extends BaseLoadBalancer { private static final Log LOG = LogFactory.getLog(FavoredNodeLoadBalancer.class); @@ -344,4 +345,4 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index db69d6eda69..96c56038290 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -31,6 +31,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -54,7 +55,7 @@ import com.google.common.collect.MinMaxPriorityQueue; * *
This classes produces plans for the {@link AssignmentManager} to execute. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SimpleLoadBalancer extends BaseLoadBalancer { private static final Log LOG = LogFactory.getLog(SimpleLoadBalancer.class); private static final Random RANDOM = new Random(System.currentTimeMillis()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 8f6314f12e5..52db329b73d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -34,6 +34,7 @@ import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerLoad; @@ -91,7 +92,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; *
This balancer is best used with hbase.master.loadbalance.bytable set to false * so that the balancer gets the full picture of all loads on the cluster.
*/ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StochasticLoadBalancer extends BaseLoadBalancer { protected static final String STEPS_PER_REGION_KEY = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java index 48c427e3c78..1b8dd302dd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.util.FSUtils; @@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.FSUtils; * /hbase/archive/table/region/cf/.links-hfile/ref-region.ref-table * To check if the hfile can be deleted the back references folder must be empty. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class HFileLinkCleaner extends BaseHFileCleanerDelegate { private static final Log LOG = LogFactory.getLog(HFileLinkCleaner.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java index 46fe971da18..470b65b389d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java @@ -22,13 +22,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * HFile cleaner that uses the timestamp of the hfile to determine if it should be deleted. By * default they are allowed to live for {@value #DEFAULT_TTL} */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate { public static final Log LOG = LogFactory.getLog(TimeToLiveHFileCleaner.class.getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java index f02195404bb..15d96c3aa9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java @@ -22,13 +22,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Log cleaner that uses the timestamp of the hlog to determine if it should * be deleted. By default they are allowed to live for 10 minutes. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate { static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName()); // Configured time a log can be kept after it was closed diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index 562f68215cc..dd070e4c958 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.util.FSUtils; @@ -36,7 +37,7 @@ import org.apache.hadoop.hbase.util.FSUtils; * Implementation of a file cleaner that checks if a hfile is still used by snapshots of HBase * tables. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceStability.Evolving public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { private static final Log LOG = LogFactory.getLog(SnapshotHFileCleaner.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java index cfe5e0d5f8b..28cfe7c5a1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.util.FSUtils; @@ -36,7 +37,7 @@ import org.apache.hadoop.hbase.util.FSUtils; * Implementation of a log cleaner that checks if a log is still used by * snapshots of HBase tables. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceStability.Evolving public class SnapshotLogCleaner extends BaseLogCleanerDelegate { private static final Log LOG = LogFactory.getLog(SnapshotLogCleaner.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 31ec0982af8..3fd55dc14fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; @@ -88,7 +89,7 @@ import org.apache.zookeeper.KeeperException; * Note: Currently there can only be one snapshot being taken at a time over the cluster. This is a * simplification in the current implementation. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceStability.Unstable public class SnapshotManager extends MasterProcedureManager implements Stoppable { private static final Log LOG = LogFactory.getLog(SnapshotManager.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index 4101c16ff40..e92b1e0420d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -26,7 +26,9 @@ import java.util.Set; import java.util.concurrent.ThreadPoolExecutor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -47,6 +49,7 @@ import org.apache.zookeeper.KeeperException; import com.google.common.collect.Lists; +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class MasterFlushTableProcedureManager extends MasterProcedureManager { public static final String FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc"; @@ -194,4 +197,4 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { return proc.isCompleted(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 36db8313012..61d0007a57a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -52,7 +53,7 @@ import org.apache.zookeeper.KeeperException; /** * This manager class handles flushing of the regions for table on a {@link HRegionServer}. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class RegionServerFlushTableProcedureManager extends RegionServerProcedureManager { private static final Log LOG = LogFactory.getLog(RegionServerFlushTableProcedureManager.class); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index 24c456260db..bb7291b0bf2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -31,7 +32,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; * changed to {@link IncreasingToUpperBoundRegionSplitPolicy} * */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { private long desiredMaxFileSize; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java index f2a0b062e2c..f20eece8b98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java @@ -24,6 +24,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; @@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; * Default StoreEngine creates the default compactor, policy, and store file manager, or * their derivatives. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class DefaultStoreEngine extends StoreEngine< DefaultStoreFlusher, RatioBasedCompactionPolicy, DefaultCompactor, DefaultStoreFileManager> { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java index 6db07e0b8f4..c911a0a8e67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DisabledRegionSplitPolicy.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; /** * A {@link RegionSplitPolicy} that disables region splits. @@ -26,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; * Most of the time, using {@link ConstantSizeRegionSplitPolicy} with a * large region size (10GB, etc) is safer. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class DisabledRegionSplitPolicy extends RegionSplitPolicy { @Override protected boolean shouldSplit() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index f2d197aa78c..802e3d7939b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.util.ReflectionUtils; @@ -35,7 +36,7 @@ import com.google.common.base.Preconditions; * 0.94.0 * @see ConstantSizeRegionSplitPolicy Default split policy before 0.94.0 */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public abstract class RegionSplitPolicy extends Configured { private static final Class extends RegionSplitPolicy> DEFAULT_SPLIT_POLICY_CLASS = IncreasingToUpperBoundRegionSplitPolicy.class; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java index fe54a93f937..42b1e8c95e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreEngine.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; @@ -37,7 +38,7 @@ import com.google.common.base.Preconditions; /** * The storage engine that implements the stripe-based store/compaction scheme. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StripeStoreEngine extends StoreEngine
* On shutdown, requires {@link #stop(boolean)} to be called
*/
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@InterfaceStability.Unstable
public class RegionServerSnapshotManager extends RegionServerProcedureManager {
private static final Log LOG = LogFactory.getLog(RegionServerSnapshotManager.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 3f2e6d68dc2..51e61100264 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -55,7 +55,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
* which is appended at the end of the WAL. This is empty for now; it can contain some meta
* information such as Region level stats, etc in future.
*/
-@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
+@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class ProtobufLogReader extends ReaderBase {
private static final Log LOG = LogFactory.getLog(ProtobufLogReader.class);
static final byte[] PB_WAL_MAGIC = Bytes.toBytes("PWAL");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java
index 9af87795226..8e3ee5d7790 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.codec.Codec;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
/**
* Writer for protobuf-based WAL.
*/
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ProtobufLogWriter extends WriterBase {
private final Log LOG = LogFactory.getLog(this.getClass());
protected FSDataOutputStream output;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java
index 25ee6916a12..021965495cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Decryptor;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WALHdrResult;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User;
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SecureProtobufLogReader extends ProtobufLogReader {
private static final Log LOG = LogFactory.getLog(SecureProtobufLogReader.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java
index 5b17ab6eec6..1f0262d58df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALHeader;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User;
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SecureProtobufLogWriter extends ProtobufLogWriter {
private static final Log LOG = LogFactory.getLog(SecureProtobufLogWriter.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
index 8b8a19b9b6d..37cc0918ccf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Metadata;
import org.apache.hadoop.io.Text;
-@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
+@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class SequenceFileLogReader extends ReaderBase {
private static final Log LOG = LogFactory.getLog(SequenceFileLogReader.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 93c33ad35e0..00384236ec1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -47,7 +47,7 @@ import com.google.protobuf.ByteString;
* This codec is used at server side for writing cells to WAL as well as for sending edits
* as part of the distributed splitting process.
*/
-@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
+@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX, HBaseInterfaceAudience.CONFIG})
public class WALCellCodec implements Codec {
/** Configuration key for the class to use when encoding cells in the WAL */
public static final String WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index be69d98d20e..c4209801107 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -43,7 +44,7 @@ import com.google.common.collect.Sets;
* Implementation of a log cleaner that checks if a log is still scheduled for
* replication before deleting it when its TTL is over.
*/
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements Abortable {
private static final Log LOG = LogFactory.getLog(ReplicationLogCleaner.class);
private ZooKeeperWatcher zkw;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
index ea2f098b95a..f77cade6bf4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
@@ -35,8 +35,9 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class GzipFilter implements Filter {
private Set