diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index 6005871a4d3..eb03a21fa99 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.classification.tools;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
@@ -29,6 +30,7 @@ import com.sun.tools.doclets.standard.Standard;
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}.
* It delegates to the Standard Doclet, and takes the same options.
*/
+@InterfaceAudience.Private
public class ExcludePrivateAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
index c283c916162..def4f1a594f 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/IncludePublicAnnotationsStandardDoclet.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.classification.tools;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
@@ -33,6 +35,7 @@ import com.sun.tools.doclets.standard.Standard;
* are also excluded.
* It delegates to the Standard Doclet, and takes the same options.
*/
+@InterfaceAudience.Private
public class IncludePublicAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java
index 6cd52e87f01..d2d92b3a199 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java
@@ -18,6 +18,9 @@
*/
package org.apache.hadoop.hbase;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* Ways to keep cells marked for delete around.
*/
@@ -25,6 +28,8 @@ package org.apache.hadoop.hbase;
* Don't change the TRUE/FALSE labels below, these have to be called
* this way for backwards compatibility.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public enum KeepDeletedCells {
/** Deleted Cells are not retained. */
FALSE,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 3806115757f..3f55b0e9a9d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
@@ -90,6 +91,7 @@ import com.google.common.annotations.VisibleForTesting;
* gets as well.
*
*/
+@InterfaceAudience.Private
class AsyncProcess {
protected static final Log LOG = LogFactory.getLog(AsyncProcess.class);
protected static final AtomicLong COUNTER = new AtomicLong();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
index 44f1ecace84..984a8678e81 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelegatingRetryingCallable.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
public class DelegatingRetryingCallable> implements
RetryingCallable {
protected final D delegate;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
index 9d685b8566d..16707cbe706 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.client;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Keeps track of repeated failures to any region server. Multiple threads manipulate the contents
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
index 34f90d521af..911e034ef0b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
@@ -21,6 +21,9 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* Used to communicate with a single HBase table.
* Obtain an instance from an {@link HConnection}.
@@ -29,6 +32,8 @@ import java.util.List;
* @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead
*/
@Deprecated
+@InterfaceAudience.Private
+@InterfaceStability.Stable
public interface HTableInterface extends Table {
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 8d0fbc83bf0..7d61a0b4696 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -57,20 +57,20 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* Each put will be sharded into different buffer queues based on its destination region server.
* So each region server buffer queue will only have the puts which share the same destination.
* And each queue will have a flush worker thread to flush the puts request to the region server.
- * If any queue is full, the HTableMultiplexer starts to drop the Put requests for that
+ * If any queue is full, the HTableMultiplexer starts to drop the Put requests for that
* particular queue.
- *
+ *
* Also all the puts will be retried as a configuration number before dropping.
* And the HTableMultiplexer can report the number of buffered requests and the number of the
* failed (dropped) requests in total or on per region server basis.
- *
+ *
* This class is thread safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HTableMultiplexer {
private static final Log LOG = LogFactory.getLog(HTableMultiplexer.class.getName());
-
+
public static final String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS =
"hbase.tablemultiplexer.flush.period.ms";
public static final String TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads";
@@ -89,7 +89,7 @@ public class HTableMultiplexer {
private final int maxKeyValueSize;
private final ScheduledExecutorService executor;
private final long flushPeriod;
-
+
/**
* @param conf The HBaseConfiguration
* @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
@@ -128,7 +128,7 @@ public class HTableMultiplexer {
}
/**
- * The puts request will be buffered by their corresponding buffer queue.
+ * The puts request will be buffered by their corresponding buffer queue.
* Return the list of puts which could not be queued.
* @param tableName
* @param puts
@@ -138,13 +138,13 @@ public class HTableMultiplexer {
public List put(TableName tableName, final List puts) {
if (puts == null)
return null;
-
+
List failedPuts = null;
boolean result;
for (Put put : puts) {
result = put(tableName, put, this.retryNum);
if (result == false) {
-
+
// Create the failed puts list if necessary
if (failedPuts == null) {
failedPuts = new ArrayList();
@@ -163,7 +163,7 @@ public class HTableMultiplexer {
public List put(byte[] tableName, final List puts) {
return put(TableName.valueOf(tableName), puts);
}
-
+
/**
* The put request will be buffered by its corresponding buffer queue. And the put request will be
* retried before dropping the request.
@@ -185,7 +185,7 @@ public class HTableMultiplexer {
// Generate a MultiPutStatus object and offer it into the queue
PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry);
-
+
return queue.offer(s);
}
} catch (IOException e) {
@@ -209,7 +209,7 @@ public class HTableMultiplexer {
public boolean put(final byte[] tableName, Put put) {
return put(TableName.valueOf(tableName), put);
}
-
+
/**
* @return the current HTableMultiplexerStatus
*/
@@ -239,6 +239,8 @@ public class HTableMultiplexer {
* report the number of buffered requests and the number of the failed (dropped) requests
* in total or on per region server basis.
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
public static class HTableMultiplexerStatus {
private long totalFailedPutCounter;
private long totalBufferedPutCounter;
@@ -339,7 +341,7 @@ public class HTableMultiplexer {
return this.serverToAverageLatencyMap;
}
}
-
+
private static class PutStatus {
public final HRegionInfo regionInfo;
public final Put put;
@@ -406,7 +408,7 @@ public class HTableMultiplexer {
private final ScheduledExecutorService executor;
private final int maxRetryInQueue;
private final AtomicInteger retryInQueue = new AtomicInteger(0);
-
+
public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr,
HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize,
ExecutorService pool, ScheduledExecutorService executor) {
@@ -443,7 +445,7 @@ public class HTableMultiplexer {
private boolean resubmitFailedPut(PutStatus ps, HRegionLocation oldLoc) throws IOException {
// Decrease the retry count
final int retryCount = ps.retryCount - 1;
-
+
if (retryCount <= 0) {
// Update the failed counter and no retry any more.
return false;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index a1353844d53..66dcdce1d72 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
@@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD",
justification="stub used by ipc")
+@InterfaceAudience.Private
public abstract class RegionAdminServiceCallable implements RetryingCallable {
protected final ClusterConnection connection;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
index 27067181874..3c4b39fbfba 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
@@ -41,6 +41,7 @@ public class RetriesExhaustedException extends IOException {
/**
* Datastructure that allows adding more info around Throwable incident.
*/
+ @InterfaceAudience.Private
public static class ThrowableWithExtraContext {
private final Throwable t;
private final long when;
@@ -52,7 +53,7 @@ public class RetriesExhaustedException extends IOException {
this.when = when;
this.extras = extras;
}
-
+
@Override
public String toString() {
return new Date(this.when).toString() + ", " + extras + ", " + t.toString();
@@ -76,6 +77,7 @@ public class RetriesExhaustedException extends IOException {
* @param numTries
* @param exceptions List of exceptions that failed before giving up
*/
+ @InterfaceAudience.Private
public RetriesExhaustedException(final int numTries,
final List exceptions) {
super(getMessage(numTries, exceptions),
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java
index 53ed7c9b47c..6949a578d77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.RegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Gets or Scans throw this exception if running without in-row scan flag
@@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
* hbase.table.max.rowsize).
*/
@InterfaceAudience.Public
+@InterfaceStability.Stable
public class RowTooBigException extends RegionException {
public RowTooBigException(String message) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
index f594a8cd854..9f059977c2b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerFactory.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.client;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.ReflectionUtils;
@@ -24,6 +25,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
/**
* Factory to create an {@link RpcRetryingCaller}
*/
+@InterfaceAudience.Private
public class RpcRetryingCallerFactory {
/** Configuration key for a custom {@link RpcRetryingCaller} */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 85704ffb5d7..57accced8d1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import com.google.protobuf.ServiceException;
+
import org.htrace.Trace;
/**
@@ -59,6 +61,7 @@ import org.htrace.Trace;
* the first answer. If the answer comes from one of the secondary replica, it will
* be marked as stale.
*/
+@InterfaceAudience.Private
public class RpcRetryingCallerWithReadReplicas {
static final Log LOG = LogFactory.getLog(RpcRetryingCallerWithReadReplicas.class);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
index e14bda669cd..f8a0e1c89e8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/Batch.java
@@ -47,6 +47,8 @@ public abstract class Batch {
* {@link Batch.Call#call(Object)}
* @param the return type from {@link Batch.Call#call(Object)}
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Stable
public interface Call {
R call(T instance) throws IOException;
}
@@ -65,6 +67,8 @@ public abstract class Batch {
* @param the return type from the associated {@link Batch.Call#call(Object)}
* @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Stable
public interface Callback {
void update(byte[] region, byte[] row, R result);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
index cb8e5df4c9a..49134f1f092 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java
@@ -38,8 +38,8 @@ package org.apache.hadoop.hbase.exceptions;
import java.io.IOException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Thrown when the client believes that we are trying to communicate to has
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index 570eda2e6ec..5bfd2f3e85a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -17,9 +17,14 @@
*/
package org.apache.hadoop.hbase.exceptions;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* Exception thrown if a mutation fails sanity checks.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRetryIOException {
private static final long serialVersionUID = 1788783640409186240L;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
index c30955bf358..b6b3c32e57f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LockTimeoutException.java
@@ -19,8 +19,15 @@
*/
package org.apache.hadoop.hbase.exceptions;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+/**
+ * Thrown when there is a timeout when trying to acquire a lock
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
public class LockTimeoutException extends DoNotRetryIOException {
private static final long serialVersionUID = -1770764924258999825L;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
index 2d66d54852b..51c960df369 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.exceptions;
import java.net.ConnectException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.ServerName;
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java
index 90ec7cf55ae..933e8883c4b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/UnknownProtocolException.java
@@ -19,10 +19,15 @@
package org.apache.hadoop.hbase.exceptions;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* An error requesting an RPC protocol that the server is not serving.
*/
@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class UnknownProtocolException extends org.apache.hadoop.hbase.DoNotRetryIOException {
private Class> protocol;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
index 38854d4755f..91eef6a98b0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/LongComparator.java
@@ -19,6 +19,9 @@
package org.apache.hadoop.hbase.filter;
import com.google.protobuf.InvalidProtocolBufferException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
import org.apache.hadoop.hbase.util.Bytes;
@@ -26,7 +29,8 @@ import org.apache.hadoop.hbase.util.Bytes;
/**
* A long comparator which numerical compares against the specified byte array
*/
-
+@InterfaceAudience.Public
+@InterfaceStability.Stable
public class LongComparator extends ByteArrayComparable {
private Long longValue;
@@ -44,6 +48,7 @@ public class LongComparator extends ByteArrayComparable {
/**
* @return The comparator serialized using pb
*/
+ @Override
public byte [] toByteArray() {
ComparatorProtos.LongComparator.Builder builder =
ComparatorProtos.LongComparator.newBuilder();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
index 127aa4c3e5e..70dd1f9b09b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RegexStringComparator.java
@@ -79,6 +79,8 @@ public class RegexStringComparator extends ByteArrayComparable {
private Engine engine;
/** Engine implementation type (default=JAVA) */
+ @InterfaceAudience.Public
+ @InterfaceStability.Stable
public enum EngineType {
JAVA,
JONI
@@ -153,6 +155,7 @@ public class RegexStringComparator extends ByteArrayComparable {
/**
* @return The comparator serialized using pb
*/
+ @Override
public byte [] toByteArray() {
return engine.toByteArray();
}
@@ -175,7 +178,7 @@ public class RegexStringComparator extends ByteArrayComparable {
if (proto.hasEngine()) {
EngineType engine = EngineType.valueOf(proto.getEngine());
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
- engine);
+ engine);
} else {
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
}
@@ -195,6 +198,7 @@ public class RegexStringComparator extends ByteArrayComparable {
* @return true if and only if the fields of the comparator that are serialized
* are equal to the corresponding fields in other. Used for testing.
*/
+ @Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
if (other == this) return true;
if (!(other instanceof RegexStringComparator)) return false;
@@ -212,7 +216,7 @@ public class RegexStringComparator extends ByteArrayComparable {
/**
* This is an internal interface for abstracting access to different regular
- * expression matching engines.
+ * expression matching engines.
*/
static interface Engine {
/**
@@ -220,7 +224,7 @@ public class RegexStringComparator extends ByteArrayComparable {
* for matching
*/
String getPattern();
-
+
/**
* Returns the set of configured match flags, a bit mask that may include
* {@link Pattern} flags
@@ -412,7 +416,7 @@ public class RegexStringComparator extends ByteArrayComparable {
encoding = e.getEncoding();
} else {
throw new IllegalCharsetNameException(name);
- }
+ }
}
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java
index a91ecb529ec..ad4224b7c58 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/DelegatingPayloadCarryingRpcController.java
@@ -19,11 +19,13 @@ package org.apache.hadoop.hbase.ipc;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Simple delegating controller for use with the {@link RpcControllerFactory} to help override
* standard behavior of a {@link PayloadCarryingRpcController}.
*/
+@InterfaceAudience.Private
public class DelegatingPayloadCarryingRpcController extends PayloadCarryingRpcController {
private PayloadCarryingRpcController delegate;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
index 6d00adcd592..67e2524d81f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
@@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
@@ -49,6 +50,7 @@ import com.google.protobuf.Message;
/**
* Utility to help ipc'ing.
*/
+@InterfaceAudience.Private
class IPCUtil {
public static final Log LOG = LogFactory.getLog(IPCUtil.class);
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java
index 8f1780ca53b..f8ab23f12d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcControllerFactory.java
@@ -22,11 +22,13 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.ReflectionUtils;
/**
* Factory to create a {@link PayloadCarryingRpcController}
*/
+@InterfaceAudience.Private
public class RpcControllerFactory {
public static final String CUSTOM_CONTROLLER_CONF_KEY = "hbase.rpc.controllerfactory.class";
@@ -39,7 +41,7 @@ public class RpcControllerFactory {
public PayloadCarryingRpcController newController() {
return new PayloadCarryingRpcController();
}
-
+
public PayloadCarryingRpcController newController(final CellScanner cellScanner) {
return new PayloadCarryingRpcController(cellScanner);
}
@@ -47,7 +49,7 @@ public class RpcControllerFactory {
public PayloadCarryingRpcController newController(final List cellIterables) {
return new PayloadCarryingRpcController(cellIterables);
}
-
+
public static RpcControllerFactory instantiate(Configuration configuration) {
String rpcControllerFactoryClazz =
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
index 0a4354adf08..5a64c65d263 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc;
import java.io.IOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.Descriptors;
@@ -51,6 +52,7 @@ import com.google.protobuf.Service;
*
*
*/
+@InterfaceAudience.Private
public class ServerRpcController implements RpcController {
/**
* The exception thrown within
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java
index a56f904c806..2ab2a5b924b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/TimeLimitedRpcController.java
@@ -19,11 +19,15 @@
package org.apache.hadoop.hbase.ipc;
+
import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
+@InterfaceAudience.Private
public class TimeLimitedRpcController implements RpcController {
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 4bbc77ba708..9c451b1b1b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -37,6 +37,7 @@ import java.util.Map.Entry;
import java.util.NavigableSet;
import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
@@ -159,6 +160,7 @@ import com.google.protobuf.TextFormat;
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED",
justification="None. Address sometime.")
+@InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class
public final class ProtobufUtil {
private ProtobufUtil() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
index 2675e3aff3a..54a1545f0ff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/InvalidQuotaSettingsException.java
@@ -19,10 +19,12 @@
package org.apache.hadoop.hbase.quotas;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Generic quota exceeded exception for invalid settings
*/
+@InterfaceAudience.Private
public class InvalidQuotaSettingsException extends DoNotRetryIOException {
public InvalidQuotaSettingsException(String msg) {
super(msg);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java
index d9bea8c9af7..e0386b57129 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaExceededException.java
@@ -19,10 +19,14 @@
package org.apache.hadoop.hbase.quotas;
import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Generic quota exceeded exception
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class QuotaExceededException extends DoNotRetryIOException {
public QuotaExceededException(String msg) {
super(msg);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
index bcad9431f75..dad1eddd127 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
@@ -23,6 +23,8 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Describe the throttling result.
@@ -31,11 +33,15 @@ import org.apache.commons.logging.LogFactory;
* operation to go on the server if the waitInterval is grater than the one got
* as result of this exception.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class ThrottlingException extends QuotaExceededException {
private static final long serialVersionUID = 1406576492085155743L;
private static final Log LOG = LogFactory.getLog(ThrottlingException.class);
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
public enum Type {
NumRequestsExceeded,
NumReadRequestsExceeded,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java
index 66781f16707..937e9434e80 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationException.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.replication;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.HBaseException;
/**
@@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.exceptions.HBaseException;
* store, loss of connection to a peer cluster or errors during deserialization of replication data.
*/
@InterfaceAudience.Public
+@InterfaceStability.Stable
public class ReplicationException extends HBaseException {
private static final long serialVersionUID = -8885598603988198062L;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 35fa602b01d..f115a39fb55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.replication;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
* A factory class for instantiating replication objects that deal with replication state.
*/
+@InterfaceAudience.Private
public class ReplicationFactory {
public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 91e0b05c929..8f01a760c77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CompoundConfiguration;
@@ -75,6 +76,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
*
* /hbase/replication/peers/1/tableCFs [Value: "table1; table2:cf1,cf3; table3:cfx,cfy"]
*/
+@InterfaceAudience.Private
public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements ReplicationPeers {
// Map of peer clusters keyed by their id
@@ -110,16 +112,16 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
throw new IllegalArgumentException("Cannot add a peer with id=" + id
+ " because that id already exists.");
}
-
+
if(id.contains("-")){
throw new IllegalArgumentException("Found invalid peer name:" + id);
}
-
+
ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
List listOfOps = new ArrayList();
ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id),
toByteArray(peerConfig));
- // There is a race (if hbase.zookeeper.useMulti is false)
+ // There is a race (if hbase.zookeeper.useMulti is false)
// b/w PeerWatcher and ReplicationZookeeper#add method to create the
// peer-state znode. This happens while adding a peer
// The peer state data is set as "ENABLED" by default.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
index cd19157faeb..ab9a2c20268 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java
@@ -25,6 +25,8 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
/**
@@ -32,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName;
* It will extract the peerId if it's recovered as well as the dead region servers
* that were part of the queue's history.
*/
+@InterfaceAudience.Private
public class ReplicationQueueInfo {
private static final Log LOG = LogFactory.getLog(ReplicationQueueInfo.class);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
index e8fa4df24ba..fed1791ffec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.replication;
import java.util.List;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* This provides an interface for clients of replication to view replication queues. These queues
* keep track of the WALs that still need to be replicated to remote clusters.
*/
+@InterfaceAudience.Private
public interface ReplicationQueuesClient {
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index fba1fef018a..43262a0d3a7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.replication;
import java.util.List;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
+@InterfaceAudience.Private
public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implements
ReplicationQueuesClient {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 3ed51c73a71..635b0212d6b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -27,6 +27,7 @@ import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HConstants;
@@ -60,6 +61,7 @@ import org.apache.zookeeper.KeeperException;
*
* /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 254]
*/
+@InterfaceAudience.Private
public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements ReplicationQueues {
/** Znode containing all replication queues for this region server. */
@@ -69,7 +71,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
- public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
+ public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
Abortable abortable) {
super(zk, conf, abortable);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
index 2302438ba66..1691b3f3840 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication;
import java.util.List;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -32,6 +33,7 @@ import org.apache.zookeeper.KeeperException;
/**
* This is a base class for maintaining replication state in zookeeper.
*/
+@InterfaceAudience.Private
public abstract class ReplicationStateZKBase {
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
index 1a19cdd0314..f9f2d43cd0e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
@@ -24,6 +24,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Stoppable;
@@ -37,6 +38,7 @@ import org.apache.zookeeper.KeeperException;
* responsible for handling replication events that are defined in the ReplicationListener
* interface.
*/
+@InterfaceAudience.Private
public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements ReplicationTracker {
private static final Log LOG = LogFactory.getLog(ReplicationTrackerZKImpl.class);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 895d0671263..5a31f26eeff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -61,7 +61,7 @@ public class HBaseSaslRpcClient {
private final boolean fallbackAllowed;
/**
* Create a HBaseSaslRpcClient for an authentication method
- *
+ *
* @param method
* the requested authentication method
* @param token
@@ -75,11 +75,11 @@ public class HBaseSaslRpcClient {
public HBaseSaslRpcClient(AuthMethod method,
Token extends TokenIdentifier> token, String serverPrincipal, boolean fallbackAllowed)
throws IOException {
- this(method, token, serverPrincipal, fallbackAllowed, "authentication");
+ this(method, token, serverPrincipal, fallbackAllowed, "authentication");
}
/**
* Create a HBaseSaslRpcClient for an authentication method
- *
+ *
* @param method
* the requested authentication method
* @param token
@@ -134,8 +134,8 @@ public class HBaseSaslRpcClient {
throw new IOException("Unable to find SASL client implementation");
}
- protected SaslClient createDigestSaslClient(String[] mechanismNames,
- String saslDefaultRealm, CallbackHandler saslClientCallbackHandler)
+ protected SaslClient createDigestSaslClient(String[] mechanismNames,
+ String saslDefaultRealm, CallbackHandler saslClientCallbackHandler)
throws IOException {
return Sasl.createSaslClient(mechanismNames, null, null, saslDefaultRealm,
SaslUtil.SASL_PROPS, saslClientCallbackHandler);
@@ -143,7 +143,7 @@ public class HBaseSaslRpcClient {
protected SaslClient createKerberosSaslClient(String[] mechanismNames,
String userFirstPart, String userSecondPart) throws IOException {
- return Sasl.createSaslClient(mechanismNames, null, userFirstPart,
+ return Sasl.createSaslClient(mechanismNames, null, userFirstPart,
userSecondPart, SaslUtil.SASL_PROPS, null);
}
@@ -154,16 +154,16 @@ public class HBaseSaslRpcClient {
WritableUtils.readString(inStream));
}
}
-
+
/**
* Do client side SASL authentication with server via the given InputStream
* and OutputStream
- *
+ *
* @param inS
* InputStream to use
* @param outS
* OutputStream to use
- * @return true if connection is set up, or false if needs to switch
+ * @return true if connection is set up, or false if needs to switch
* to simple Auth.
* @throws IOException
*/
@@ -243,7 +243,7 @@ public class HBaseSaslRpcClient {
/**
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has
* been called.
- *
+ *
* @param in
* the InputStream to wrap
* @return a SASL wrapped InputStream
@@ -259,7 +259,7 @@ public class HBaseSaslRpcClient {
/**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has
* been called.
- *
+ *
* @param out
* the OutputStream to wrap
* @return a SASL wrapped OutputStream
@@ -287,6 +287,7 @@ public class HBaseSaslRpcClient {
this.userPassword = SaslUtil.encodePassword(token.getPassword());
}
+ @Override
public void handle(Callback[] callbacks)
throws UnsupportedCallbackException {
NameCallback nc = null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index 9916a515c7a..7bf5304f716 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -43,6 +43,9 @@ import com.google.common.collect.Maps;
@InterfaceStability.Evolving
public class Permission extends VersionedWritable {
protected static final byte VERSION = 0;
+
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
public enum Action {
READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A');
@@ -148,6 +151,7 @@ public class Permission extends VersionedWritable {
return result;
}
+ @Override
public String toString() {
StringBuilder str = new StringBuilder("[Permission: ")
.append("actions=");
@@ -167,6 +171,7 @@ public class Permission extends VersionedWritable {
}
/** @return the object version number */
+ @Override
public byte getVersion() {
return VERSION;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
index 87371f7f900..e4758b0a9da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
@@ -354,6 +354,7 @@ public class TablePermission extends Permission {
return result;
}
+ @Override
public String toString() {
StringBuilder str = new StringBuilder("[TablePermission: ");
if(namespace != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
index 7d44ddc2838..73139899c55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
@@ -183,6 +183,7 @@ public class UserPermission extends TablePermission {
return result;
}
+ @Override
public String toString() {
StringBuilder str = new StringBuilder("UserPermission: ")
.append("user=").append(Bytes.toString(user))
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
index 784aec61f10..604a21af4da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
@@ -42,7 +42,7 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier {
protected long issueDate;
protected long expirationDate;
protected long sequenceNumber;
-
+
public AuthenticationTokenIdentifier() {
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java
index 4d87bdf64ef..90dd0a7f258 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.security.visibility;
import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/*
* This exception indicates that VisibilityController hasn't finished initialization.
*/
@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class VisibilityControllerNotReadyException extends IOException {
private static final long serialVersionUID = 1725986525207989173L;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java
index f6817e7da17..05f3556a2ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshotException.java
@@ -18,11 +18,13 @@
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Thrown when a snapshot could not be exported due to an error during the operation.
*/
@InterfaceAudience.Public
+@InterfaceStability.Stable
@SuppressWarnings("serial")
public class ExportSnapshotException extends HBaseSnapshotException {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
new file mode 100644
index 00000000000..ace11ec16f6
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Modifier;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.ClassFinder.And;
+import org.apache.hadoop.hbase.ClassFinder.FileNameFilter;
+import org.apache.hadoop.hbase.ClassFinder.Not;
+import org.apache.hadoop.hbase.ClassTestFinder.TestClassFilter;
+import org.apache.hadoop.hbase.ClassTestFinder.TestFileNameFilter;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test cases for ensuring our client visible classes have annotations
+ * for {@link InterfaceAudience}.
+ *
+ * All classes in hbase-client and hbase-common module MUST have InterfaceAudience
+ * annotations. All InterfaceAudience.Public annotated classes MUST also have InterfaceStability
+ * annotations. Think twice about marking an interface InterfaceAudience.Public. Make sure that
+ * it is an interface, not a class (for most cases), and clients will actually depend on it. Once
+ * something is marked with Public, we cannot change the signatures within the major release. NOT
+ * everything in the hbase-client module or every java public class has to be marked with
+ * InterfaceAudience.Public. ONLY the ones that an hbase application will directly use (Table, Get,
+ * etc, versus ProtobufUtil).
+ *
+ * Also note that HBase has it's own annotations in hbase-annotations module with the same names
+ * as in Hadoop. You should use the HBase's classes.
+ *
+ * See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
+ * and https://issues.apache.org/jira/browse/HBASE-10462.
+ */
+@Category(SmallTests.class)
+public class TestInterfaceAudienceAnnotations {
+
+ private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class);
+
+ /** Selects classes with generated in their package name */
+ class GeneratedClassFilter implements ClassFinder.ClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ return c.getPackage().getName().contains("generated");
+ }
+ }
+
+ /** Selects classes with one of the {@link InterfaceAudience} annotation in their class
+ * declaration.
+ */
+ class InterfaceAudienceAnnotatedClassFilter implements ClassFinder.ClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ if (getAnnotation(c) != null) {
+ // class itself has a declared annotation.
+ return true;
+ }
+
+ // If this is an internal class, look for the encapsulating class to see whether it has
+ // annotation. All inner classes of private classes are considered annotated.
+ return isAnnotatedPrivate(c.getEnclosingClass());
+ }
+
+ private boolean isAnnotatedPrivate(Class> c) {
+ if (c == null) {
+ return false;
+ }
+
+ Class> ann = getAnnotation(c);
+ if (ann != null &&
+ !InterfaceAudience.Public.class.equals(ann)) {
+ return true;
+ }
+
+ return isAnnotatedPrivate(c.getEnclosingClass());
+ }
+
+ protected Class> getAnnotation(Class> c) {
+ // we should get only declared annotations, not inherited ones
+ Annotation[] anns = c.getDeclaredAnnotations();
+
+ for (Annotation ann : anns) {
+ // Hadoop clearly got it wrong for not making the annotation values (private, public, ..)
+ // an enum instead we have three independent annotations!
+ Class> type = ann.annotationType();
+ if (isInterfaceAudienceClass(type)) {
+ return type;
+ }
+ }
+ return null;
+ }
+ }
+
+ /** Selects classes with one of the {@link InterfaceStability} annotation in their class
+ * declaration.
+ */
+ class InterfaceStabilityAnnotatedClassFilter implements ClassFinder.ClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ if (getAnnotation(c) != null) {
+ // class itself has a declared annotation.
+ return true;
+ }
+ return false;
+ }
+
+ protected Class> getAnnotation(Class> c) {
+ // we should get only declared annotations, not inherited ones
+ Annotation[] anns = c.getDeclaredAnnotations();
+
+ for (Annotation ann : anns) {
+ // Hadoop clearly got it wrong for not making the annotation values (private, public, ..)
+ // an enum instead we have three independent annotations!
+ Class> type = ann.annotationType();
+ if (isInterfaceStabilityClass(type)) {
+ return type;
+ }
+ }
+ return null;
+ }
+ }
+
+ /** Selects classes with one of the {@link InterfaceAudience.Public} annotation in their
+ * class declaration.
+ */
+ class InterfaceAudiencePublicAnnotatedClassFilter extends InterfaceAudienceAnnotatedClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ return (InterfaceAudience.Public.class.equals(getAnnotation(c)));
+ }
+ }
+
+ /**
+ * Selects InterfaceAudience or InterfaceStability classes. Don't go meta!!!
+ */
+ class IsInterfaceStabilityClassFilter implements ClassFinder.ClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ return
+ isInterfaceAudienceClass(c) ||
+ isInterfaceStabilityClass(c);
+ }
+ }
+
+ private boolean isInterfaceAudienceClass(Class> c) {
+ return
+ c.equals(InterfaceAudience.Public.class) ||
+ c.equals(InterfaceAudience.Private.class) ||
+ c.equals(InterfaceAudience.LimitedPrivate.class);
+ }
+
+ private boolean isInterfaceStabilityClass(Class> c) {
+ return
+ c.equals(InterfaceStability.Stable.class) ||
+ c.equals(InterfaceStability.Unstable.class) ||
+ c.equals(InterfaceStability.Evolving.class);
+ }
+
+ /** Selects classes that are declared public */
+ class PublicClassFilter implements ClassFinder.ClassFilter {
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ int mod = c.getModifiers();
+ return Modifier.isPublic(mod);
+ }
+ }
+
+ /** Selects paths (jars and class dirs) only from the main code, not test classes */
+ class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter {
+ @Override
+ public boolean isCandidatePath(String resourcePath, boolean isJar) {
+ return !resourcePath.contains("test-classes") &&
+ !resourcePath.contains("tests.jar");
+ }
+ }
+
+ /**
+ * Checks whether all the classes in client and common modules contain
+ * {@link InterfaceAudience} annotations.
+ */
+ @Test
+ public void testInterfaceAudienceAnnotation()
+ throws ClassNotFoundException, IOException, LinkageError {
+
+ // find classes that are:
+ // In the main jar
+ // AND are public
+ // NOT test classes
+ // AND NOT generated classes
+ // AND are NOT annotated with InterfaceAudience
+ ClassFinder classFinder = new ClassFinder(
+ new MainCodeResourcePathFilter(),
+ new Not((FileNameFilter)new TestFileNameFilter()),
+ new And(new PublicClassFilter(),
+ new Not(new TestClassFilter()),
+ new Not(new GeneratedClassFilter()),
+ new Not(new IsInterfaceStabilityClassFilter()),
+ new Not(new InterfaceAudienceAnnotatedClassFilter()))
+ );
+
+ Set> classes = classFinder.findClasses(false);
+
+ LOG.info("These are the classes that DO NOT have @InterfaceAudience annotation:");
+ for (Class> clazz : classes) {
+ LOG.info(clazz);
+ }
+
+ Assert.assertEquals("All classes should have @InterfaceAudience annotation",
+ 0, classes.size());
+ }
+
+ /**
+ * Checks whether all the classes in client and common modules that are marked
+ * InterfaceAudience.Public also have {@link InterfaceStability} annotations.
+ */
+ @Test
+ public void testInterfaceStabilityAnnotation()
+ throws ClassNotFoundException, IOException, LinkageError {
+
+ // find classes that are:
+ // In the main jar
+ // AND are public
+ // NOT test classes
+ // AND NOT generated classes
+ // AND are annotated with InterfaceAudience.Public
+ // AND NOT annotated with InterfaceStability
+ ClassFinder classFinder = new ClassFinder(
+ new MainCodeResourcePathFilter(),
+ new Not((FileNameFilter)new TestFileNameFilter()),
+ new And(new PublicClassFilter(),
+ new Not(new TestClassFilter()),
+ new Not(new GeneratedClassFilter()),
+ new InterfaceAudiencePublicAnnotatedClassFilter(),
+ new Not(new IsInterfaceStabilityClassFilter()),
+ new Not(new InterfaceStabilityAnnotatedClassFilter()))
+ );
+
+ Set> classes = classFinder.findClasses(false);
+
+ LOG.info("These are the classes that DO NOT have @InterfaceStability annotation:");
+ for (Class> clazz : classes) {
+ LOG.info(clazz);
+ }
+
+ Assert.assertEquals("All classes that are marked with @InterfaceAudience.Public should "
+ + "have @InterfaceStability annotation as well",
+ 0, classes.size());
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java
index 8ad8584b372..86b4c32e134 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/BaseConfigurable.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
@@ -26,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
* only sets the configuration through the {@link #setConf(Configuration)}
* method
*/
+@InterfaceAudience.Private
public class BaseConfigurable implements Configurable {
private Configuration conf;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 0356bffed32..60017670861 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -84,6 +84,7 @@ public final class HConstants {
/**
* Status codes used for return values of bulk operations.
*/
+ @InterfaceAudience.Private
public enum OperationStatusCode {
NOT_RUN,
SUCCESS,
@@ -570,6 +571,7 @@ public final class HConstants {
public static final String REGION_IMPL = "hbase.hregion.impl";
/** modifyTable op for replacing the table descriptor */
+ @InterfaceAudience.Private
public static enum Modify {
CLOSE_REGION,
TABLE_COMPACT,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java
index 1033f40b617..5e6f6f72460 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaMutationAnnotation.java
@@ -25,6 +25,8 @@ import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* The field or the parameter to which this annotation can be applied only when it
* holds mutations for hbase:meta table.
@@ -32,6 +34,7 @@ import java.lang.annotation.Target;
@Documented
@Target( { ElementType.LOCAL_VARIABLE, ElementType.PARAMETER })
@Retention(RetentionPolicy.CLASS)
+@InterfaceAudience.Private
public @interface MetaMutationAnnotation {
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
index acdaf0866d2..e1ceace36f4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
@@ -161,6 +161,8 @@ public class NamespaceDescriptor {
return new Builder(ns);
}
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
public static class Builder {
private String bName;
private Map bConfiguration = new TreeMap();
@@ -173,7 +175,7 @@ public class NamespaceDescriptor {
private Builder(String name) {
this.bName = name;
}
-
+
public Builder addConfiguration(Map configuration) {
this.bConfiguration.putAll(configuration);
return this;
@@ -193,7 +195,7 @@ public class NamespaceDescriptor {
if (this.bName == null){
throw new IllegalArgumentException("A name has to be specified in a namespace.");
}
-
+
NamespaceDescriptor desc = new NamespaceDescriptor(this.bName);
desc.configuration = this.bConfiguration;
return desc;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index 73525c5a7a2..f658210bdb2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -228,6 +228,8 @@ implements WritableComparable {
/** A Comparator optimized for ImmutableBytesWritable.
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Stable
public static class Comparator extends WritableComparator {
private BytesWritable.Comparator comparator =
new BytesWritable.Comparator();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java
index a9fdd214562..68e3ad40603 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java
@@ -26,11 +26,14 @@ import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* Copied from guava source code v15 (LimitedInputStream)
* Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here
* allows to be compatible with guava 11 to 15+.
*/
+@InterfaceAudience.Private
public final class LimitInputStream extends FilterInputStream {
private long left;
private long mark = -1;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 2a3c33791be..3420d0a628f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -56,6 +56,8 @@ public final class Encryption {
/**
* Crypto context
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
public static class Context extends org.apache.hadoop.hbase.io.crypto.Context {
/** The null crypto context */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 369d71eadac..dd6df0ccb0b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.io.hadoopbackport;
import java.io.IOException;
import java.io.InputStream;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* The ThrottleInputStream provides bandwidth throttling on a specified
* InputStream. It is implemented as a wrapper on top of another InputStream
@@ -31,6 +33,7 @@ import java.io.InputStream;
* (Thus, while the read-rate might exceed the maximum for a given short interval,
* the average tends towards the specified maximum, overall.)
*/
+@InterfaceAudience.Private
public class ThrottledInputStream extends InputStream {
private final InputStream rawStream;
@@ -47,7 +50,7 @@ public class ThrottledInputStream extends InputStream {
}
public ThrottledInputStream(InputStream rawStream, long maxBytesPerSec) {
- assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
+ assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
this.rawStream = rawStream;
this.maxBytesPerSec = maxBytesPerSec;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index f2a16caa7ef..eced0ff4c69 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.util.ReflectionUtils;
/**
* Provide an instance of a user. Allows custom {@link User} creation.
*/
-
@InterfaceAudience.Private
public class UserProvider extends BaseConfigurable {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
index b7fa5746e9a..b2a36d7036f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
@@ -19,8 +19,10 @@
package org.apache.hadoop.hbase.trace;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.htrace.HTraceConfiguration;
+@InterfaceAudience.Private
public class HBaseHTraceConfiguration extends HTraceConfiguration {
public static final String KEY_PREFIX = "hbase.";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index c4fcec4f98f..eb74ea17e78 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -24,6 +24,7 @@ import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.htrace.SpanReceiver;
import org.htrace.Trace;
@@ -32,6 +33,7 @@ import org.htrace.Trace;
* hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those
* SpanReceivers when appropriate.
*/
+@InterfaceAudience.Private
public class SpanReceiverHost {
public static final String SPAN_RECEIVERS_CONF_KEY = "hbase.trace.spanreceiver.classes";
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java
index ecc9c802bd6..3d545f6800f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/PBType.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.types;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Order;
import org.apache.hadoop.hbase.util.PositionedByteRange;
@@ -28,6 +30,8 @@ import com.google.protobuf.Message;
* A base-class for {@link DataType} implementations backed by protobuf. See
* {@code PBKeyValue} in {@code hbase-examples} module.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public abstract class PBType implements DataType {
@Override
public boolean isOrderPreserving() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
index c5aebab599a..cd416581395 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java
@@ -17,9 +17,14 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* An abstract implementation of the ByteRange API
*/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
public abstract class AbstractByteRange implements ByteRange {
public static final int UNSET_HASH_VALUE = -1;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 26bcf9711cb..77f71430098 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.util;
import java.nio.ByteBuffer;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
import com.google.common.annotations.VisibleForTesting;
/**
@@ -28,6 +31,8 @@ import com.google.common.annotations.VisibleForTesting;
* {@link #compareTo(ByteRange)}, {@link #hashCode()}, or
* {@link #equals(Object)}. {@code Position} is retained by copy operations.
*/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
PositionedByteRange {
/**
@@ -74,7 +79,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
/**
* Update the beginning of this range. {@code offset + length} may not be
* greater than {@code bytes.length}. Resets {@code position} to 0.
- *
+ *
* @param offset
* the new start of this range.
* @return this.
@@ -90,7 +95,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
* Update the length of this range. {@code offset + length} should not be
* greater than {@code bytes.length}. If {@code position} is greater than the
* new {@code length}, sets {@code position} to {@code length}.
- *
+ *
* @param length
* The new length of this range.
* @return this.
@@ -153,28 +158,28 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
@Override
public abstract PositionedByteRange put(byte[] val, int offset, int length);
-
+
@Override
- public abstract PositionedByteRange putInt(int index, int val);
+ public abstract PositionedByteRange putInt(int index, int val);
@Override
public abstract PositionedByteRange putLong(int index, long val);
-
+
@Override
public abstract PositionedByteRange putShort(int index, short val);
-
+
@Override
public abstract PositionedByteRange putInt(int val);
-
+
@Override
public abstract PositionedByteRange putLong(long val);
-
+
@Override
public abstract PositionedByteRange putShort(short val);
-
+
@Override
public abstract int putVLong(int index, long val);
-
+
@Override
public abstract int putVLong(long val);
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
index 76e2549afdc..d1f4f208ec8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java
@@ -1456,6 +1456,8 @@ public class Base64 {
* @see Base64
* @since 1.3
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Stable
public static class Base64OutputStream extends FilterOutputStream {
private boolean encode;
private int position;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java
index d89d3379424..c3fa547b327 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/BoundedCompletionService.java
@@ -30,12 +30,15 @@ import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* A completion service, close to the one available in the JDK 1.7
* However, this ones keeps the list of the future, and allows to cancel them all.
* This means as well that it can be used for a small set of tasks only.
*
Implementation is not Thread safe.
*/
+@InterfaceAudience.Private
public class BoundedCompletionService {
private final Executor executor;
private final List> tasks; // alls the tasks
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
index cc09c3ae3e4..414832dd23d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumFactory.java
@@ -23,12 +23,15 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.util.zip.Checksum;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* Utility class that is used to generate a Checksum object.
* The Checksum implementation is pluggable and an application
* can specify their own class that implements their own
* Checksum algorithm.
*/
+@InterfaceAudience.Private
public class ChecksumFactory {
static private final Class>[] EMPTY_ARRAY = new Class[]{};
@@ -51,7 +54,7 @@ public class ChecksumFactory {
* @param className classname for which an constructor is created
* @return a new Constructor object
*/
- static public Constructor> newConstructor(String className)
+ static public Constructor> newConstructor(String className)
throws IOException {
try {
Class> clazz = getClassByName(className);
@@ -88,7 +91,7 @@ public class ChecksumFactory {
* @return the class object.
* @throws ClassNotFoundException if the class is not found.
*/
- static private Class> getClassByName(String name)
+ static private Class> getClassByName(String name)
throws ClassNotFoundException {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
return Class.forName(name, true, classLoader);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index 86af5e77a8f..95df7693590 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -24,13 +24,15 @@ import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Checksum types. The Checksum type is a one byte number
* that stores a representation of the checksum algorithm
- * used to encode a hfile. The ordinal of these cannot
+ * used to encode a hfile. The ordinal of these cannot
* change or else you risk breaking all existing HFiles out there.
*/
+@InterfaceAudience.Private
public enum ChecksumType {
NULL((byte)0) {
@@ -70,7 +72,7 @@ public enum ChecksumType {
LOG.trace(PURECRC32 + " not available.");
}
try {
- // The default checksum class name is java.util.zip.CRC32.
+ // The default checksum class name is java.util.zip.CRC32.
// This is available on all JVMs.
if (ctor == null) {
ctor = ChecksumFactory.newConstructor(JDKCRC);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
index 2180b4d3927..0f00132dec0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
@@ -24,12 +24,15 @@ import java.util.Collection;
import java.util.List;
import java.util.NoSuchElementException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* A collection class that contains multiple sub-lists, which allows us to not copy lists.
* This class does not support modification. The derived classes that add modifications are
* not thread-safe.
* NOTE: Doesn't implement list as it is not necessary for current usage, feel free to add.
*/
+@InterfaceAudience.Private
public class ConcatenatedLists implements Collection {
protected final ArrayList> components = new ArrayList>();
protected int size = 0;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java
index d56055a668d..688b51a02a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ExceptionUtil.java
@@ -22,6 +22,8 @@ import java.io.InterruptedIOException;
import java.net.SocketTimeoutException;
import java.nio.channels.ClosedByInterruptException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
/**
* This class handles the different interruption classes.
* It can be:
@@ -31,6 +33,7 @@ import java.nio.channels.ClosedByInterruptException;
* - SocketTimeoutException inherits InterruptedIOException but is not a real
* interruption, so we have to distinguish the case. This pattern is unfortunately common.
*/
+@InterfaceAudience.Private
public class ExceptionUtil {
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java
index cb96f3ea156..89014dbdd00 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java
@@ -18,6 +18,9 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* This is a very fast, non-cryptographic hash suitable for general hash-based
* lookup. See http://code.google.com/p/smhasher/wiki/MurmurHash3 for details.
@@ -25,6 +28,8 @@ package org.apache.hadoop.hbase.util;
* MurmurHash3 is the successor to MurmurHash2. It comes in 3 variants, and
* the 32-bit version targets low latency for hash table use.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
public class MurmurHash3 extends Hash {
private static MurmurHash3 _instance = new MurmurHash3();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index 0e50dfbd0b0..8c8f6188f61 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.util;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
+@InterfaceAudience.Private
public class PrettyPrinter {
public enum Unit {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java
index 6f508e29a93..c14f1e25680 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReadOnlyByteRangeException.java
@@ -17,9 +17,14 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* Exception thrown when a read only byte range is modified
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class ReadOnlyByteRangeException extends UnsupportedOperationException {
public ReadOnlyByteRangeException() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java
index db3ca0fdae6..4d5e5b5e88e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleByteRange.java
@@ -17,13 +17,18 @@
*/
package org.apache.hadoop.hbase.util;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
/**
* A read only version of the {@link ByteRange}.
*/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
public class SimpleByteRange extends AbstractByteRange {
public SimpleByteRange() {
}
-
+
public SimpleByteRange(int capacity) {
this(new byte[capacity]);
}
@@ -110,7 +115,7 @@ public class SimpleByteRange extends AbstractByteRange {
}
return clone;
}
-
+
@Override
public ByteRange shallowCopySubRange(int innerOffset, int copyLength) {
SimpleByteRange clone = new SimpleByteRange(bytes, offset + innerOffset,
@@ -120,7 +125,7 @@ public class SimpleByteRange extends AbstractByteRange {
}
return clone;
}
-
+
@Override
public boolean equals(Object thatObject) {
if (thatObject == null){
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
index c15ace9607a..d46537c1dd1 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
@@ -62,6 +62,43 @@ public class ClassFinder {
boolean isCandidateClass(Class> c);
};
+ public static class Not implements ResourcePathFilter, FileNameFilter, ClassFilter {
+ private ResourcePathFilter resourcePathFilter;
+ private FileNameFilter fileNameFilter;
+ private ClassFilter classFilter;
+
+ public Not(ResourcePathFilter resourcePathFilter){this.resourcePathFilter = resourcePathFilter;}
+ public Not(FileNameFilter fileNameFilter){this.fileNameFilter = fileNameFilter;}
+ public Not(ClassFilter classFilter){this.classFilter = classFilter;}
+
+ @Override
+ public boolean isCandidatePath(String resourcePath, boolean isJar) {
+ return !resourcePathFilter.isCandidatePath(resourcePath, isJar);
+ }
+ @Override
+ public boolean isCandidateFile(String fileName, String absFilePath) {
+ return !fileNameFilter.isCandidateFile(fileName, absFilePath);
+ }
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ return !classFilter.isCandidateClass(c);
+ }
+ }
+
+ public static class And implements ClassFilter {
+ ClassFilter[] classFilters;
+ public And(ClassFilter...classFilters) { this.classFilters = classFilters; }
+ @Override
+ public boolean isCandidateClass(Class> c) {
+ for (ClassFilter filter : classFilters) {
+ if (!filter.isCandidateClass(c)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
public ClassFinder() {
this(null, null, null);
}
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 297a7af109b..7787c52b43f 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -103,6 +103,10 @@
+
+ org.apache.hbase
+ hbase-annotations
+
com.google.protobuf
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java
index ab354bcddeb..5b10b830008 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import com.google.protobuf.ByteString;
import com.google.protobuf.HBaseZeroCopyByteString;
@@ -26,6 +27,7 @@ import com.google.protobuf.HBaseZeroCopyByteString;
/**
* Hack to workaround HBASE-1304 issue that keeps bubbling up when a mapreduce context.
*/
+@InterfaceAudience.Private
public class ByteStringer {
private static final Log LOG = LogFactory.getLog(ByteStringer.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
index f34b83dc144..0e03a423fc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.zip.Checksum;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
@@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.util.ChecksumType;
/**
* Utility methods to compute and validate checksums.
*/
+@InterfaceAudience.Private
public class ChecksumUtil {
/** This is used to reserve space in a byte buffer */