HBASE-10671 Add missing InterfaceAudience annotations for classes in hbase-common and hbase-client modules
This commit is contained in:
parent
890c067b66
commit
882324dbcc
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.classification.tools;
|
package org.apache.hadoop.hbase.classification.tools;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import com.sun.javadoc.DocErrorReporter;
|
import com.sun.javadoc.DocErrorReporter;
|
||||||
import com.sun.javadoc.LanguageVersion;
|
import com.sun.javadoc.LanguageVersion;
|
||||||
import com.sun.javadoc.RootDoc;
|
import com.sun.javadoc.RootDoc;
|
||||||
|
@ -29,6 +30,7 @@ import com.sun.tools.doclets.standard.Standard;
|
||||||
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}.
|
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}.
|
||||||
* It delegates to the Standard Doclet, and takes the same options.
|
* It delegates to the Standard Doclet, and takes the same options.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ExcludePrivateAnnotationsStandardDoclet {
|
public class ExcludePrivateAnnotationsStandardDoclet {
|
||||||
|
|
||||||
public static LanguageVersion languageVersion() {
|
public static LanguageVersion languageVersion() {
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.classification.tools;
|
package org.apache.hadoop.hbase.classification.tools;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
import com.sun.javadoc.DocErrorReporter;
|
import com.sun.javadoc.DocErrorReporter;
|
||||||
import com.sun.javadoc.LanguageVersion;
|
import com.sun.javadoc.LanguageVersion;
|
||||||
import com.sun.javadoc.RootDoc;
|
import com.sun.javadoc.RootDoc;
|
||||||
|
@ -33,6 +35,7 @@ import com.sun.tools.doclets.standard.Standard;
|
||||||
* are also excluded.
|
* are also excluded.
|
||||||
* It delegates to the Standard Doclet, and takes the same options.
|
* It delegates to the Standard Doclet, and takes the same options.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class IncludePublicAnnotationsStandardDoclet {
|
public class IncludePublicAnnotationsStandardDoclet {
|
||||||
|
|
||||||
public static LanguageVersion languageVersion() {
|
public static LanguageVersion languageVersion() {
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ways to keep cells marked for delete around.
|
* Ways to keep cells marked for delete around.
|
||||||
*/
|
*/
|
||||||
|
@ -25,6 +28,8 @@ package org.apache.hadoop.hbase;
|
||||||
* Don't change the TRUE/FALSE labels below, these have to be called
|
* Don't change the TRUE/FALSE labels below, these have to be called
|
||||||
* this way for backwards compatibility.
|
* this way for backwards compatibility.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public enum KeepDeletedCells {
|
public enum KeepDeletedCells {
|
||||||
/** Deleted Cells are not retained. */
|
/** Deleted Cells are not retained. */
|
||||||
FALSE,
|
FALSE,
|
||||||
|
|
|
@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -90,6 +91,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* gets as well.
|
* gets as well.
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
class AsyncProcess {
|
class AsyncProcess {
|
||||||
protected static final Log LOG = LogFactory.getLog(AsyncProcess.class);
|
protected static final Log LOG = LogFactory.getLog(AsyncProcess.class);
|
||||||
protected static final AtomicLong COUNTER = new AtomicLong();
|
protected static final AtomicLong COUNTER = new AtomicLong();
|
||||||
|
|
|
@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class DelegatingRetryingCallable<T, D extends RetryingCallable<T>> implements
|
public class DelegatingRetryingCallable<T, D extends RetryingCallable<T>> implements
|
||||||
RetryingCallable<T> {
|
RetryingCallable<T> {
|
||||||
protected final D delegate;
|
protected final D delegate;
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.client;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Keeps track of repeated failures to any region server. Multiple threads manipulate the contents
|
* Keeps track of repeated failures to any region server. Multiple threads manipulate the contents
|
||||||
|
|
|
@ -21,6 +21,9 @@ package org.apache.hadoop.hbase.client;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used to communicate with a single HBase table.
|
* Used to communicate with a single HBase table.
|
||||||
* Obtain an instance from an {@link HConnection}.
|
* Obtain an instance from an {@link HConnection}.
|
||||||
|
@ -29,6 +32,8 @@ import java.util.List;
|
||||||
* @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead
|
* @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Stable
|
||||||
public interface HTableInterface extends Table {
|
public interface HTableInterface extends Table {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -57,20 +57,20 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
* Each put will be sharded into different buffer queues based on its destination region server.
|
* Each put will be sharded into different buffer queues based on its destination region server.
|
||||||
* So each region server buffer queue will only have the puts which share the same destination.
|
* So each region server buffer queue will only have the puts which share the same destination.
|
||||||
* And each queue will have a flush worker thread to flush the puts request to the region server.
|
* And each queue will have a flush worker thread to flush the puts request to the region server.
|
||||||
* If any queue is full, the HTableMultiplexer starts to drop the Put requests for that
|
* If any queue is full, the HTableMultiplexer starts to drop the Put requests for that
|
||||||
* particular queue.
|
* particular queue.
|
||||||
*
|
*
|
||||||
* Also all the puts will be retried as a configuration number before dropping.
|
* Also all the puts will be retried as a configuration number before dropping.
|
||||||
* And the HTableMultiplexer can report the number of buffered requests and the number of the
|
* And the HTableMultiplexer can report the number of buffered requests and the number of the
|
||||||
* failed (dropped) requests in total or on per region server basis.
|
* failed (dropped) requests in total or on per region server basis.
|
||||||
*
|
*
|
||||||
* This class is thread safe.
|
* This class is thread safe.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class HTableMultiplexer {
|
public class HTableMultiplexer {
|
||||||
private static final Log LOG = LogFactory.getLog(HTableMultiplexer.class.getName());
|
private static final Log LOG = LogFactory.getLog(HTableMultiplexer.class.getName());
|
||||||
|
|
||||||
public static final String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS =
|
public static final String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS =
|
||||||
"hbase.tablemultiplexer.flush.period.ms";
|
"hbase.tablemultiplexer.flush.period.ms";
|
||||||
public static final String TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads";
|
public static final String TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads";
|
||||||
|
@ -89,7 +89,7 @@ public class HTableMultiplexer {
|
||||||
private final int maxKeyValueSize;
|
private final int maxKeyValueSize;
|
||||||
private final ScheduledExecutorService executor;
|
private final ScheduledExecutorService executor;
|
||||||
private final long flushPeriod;
|
private final long flushPeriod;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param conf The HBaseConfiguration
|
* @param conf The HBaseConfiguration
|
||||||
* @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
|
* @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
|
||||||
|
@ -128,7 +128,7 @@ public class HTableMultiplexer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The puts request will be buffered by their corresponding buffer queue.
|
* The puts request will be buffered by their corresponding buffer queue.
|
||||||
* Return the list of puts which could not be queued.
|
* Return the list of puts which could not be queued.
|
||||||
* @param tableName
|
* @param tableName
|
||||||
* @param puts
|
* @param puts
|
||||||
|
@ -138,13 +138,13 @@ public class HTableMultiplexer {
|
||||||
public List<Put> put(TableName tableName, final List<Put> puts) {
|
public List<Put> put(TableName tableName, final List<Put> puts) {
|
||||||
if (puts == null)
|
if (puts == null)
|
||||||
return null;
|
return null;
|
||||||
|
|
||||||
List <Put> failedPuts = null;
|
List <Put> failedPuts = null;
|
||||||
boolean result;
|
boolean result;
|
||||||
for (Put put : puts) {
|
for (Put put : puts) {
|
||||||
result = put(tableName, put, this.retryNum);
|
result = put(tableName, put, this.retryNum);
|
||||||
if (result == false) {
|
if (result == false) {
|
||||||
|
|
||||||
// Create the failed puts list if necessary
|
// Create the failed puts list if necessary
|
||||||
if (failedPuts == null) {
|
if (failedPuts == null) {
|
||||||
failedPuts = new ArrayList<Put>();
|
failedPuts = new ArrayList<Put>();
|
||||||
|
@ -163,7 +163,7 @@ public class HTableMultiplexer {
|
||||||
public List<Put> put(byte[] tableName, final List<Put> puts) {
|
public List<Put> put(byte[] tableName, final List<Put> puts) {
|
||||||
return put(TableName.valueOf(tableName), puts);
|
return put(TableName.valueOf(tableName), puts);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The put request will be buffered by its corresponding buffer queue. And the put request will be
|
* The put request will be buffered by its corresponding buffer queue. And the put request will be
|
||||||
* retried before dropping the request.
|
* retried before dropping the request.
|
||||||
|
@ -185,7 +185,7 @@ public class HTableMultiplexer {
|
||||||
|
|
||||||
// Generate a MultiPutStatus object and offer it into the queue
|
// Generate a MultiPutStatus object and offer it into the queue
|
||||||
PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry);
|
PutStatus s = new PutStatus(loc.getRegionInfo(), put, retry);
|
||||||
|
|
||||||
return queue.offer(s);
|
return queue.offer(s);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -209,7 +209,7 @@ public class HTableMultiplexer {
|
||||||
public boolean put(final byte[] tableName, Put put) {
|
public boolean put(final byte[] tableName, Put put) {
|
||||||
return put(TableName.valueOf(tableName), put);
|
return put(TableName.valueOf(tableName), put);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the current HTableMultiplexerStatus
|
* @return the current HTableMultiplexerStatus
|
||||||
*/
|
*/
|
||||||
|
@ -239,6 +239,8 @@ public class HTableMultiplexer {
|
||||||
* report the number of buffered requests and the number of the failed (dropped) requests
|
* report the number of buffered requests and the number of the failed (dropped) requests
|
||||||
* in total or on per region server basis.
|
* in total or on per region server basis.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public static class HTableMultiplexerStatus {
|
public static class HTableMultiplexerStatus {
|
||||||
private long totalFailedPutCounter;
|
private long totalFailedPutCounter;
|
||||||
private long totalBufferedPutCounter;
|
private long totalBufferedPutCounter;
|
||||||
|
@ -339,7 +341,7 @@ public class HTableMultiplexer {
|
||||||
return this.serverToAverageLatencyMap;
|
return this.serverToAverageLatencyMap;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class PutStatus {
|
private static class PutStatus {
|
||||||
public final HRegionInfo regionInfo;
|
public final HRegionInfo regionInfo;
|
||||||
public final Put put;
|
public final Put put;
|
||||||
|
@ -406,7 +408,7 @@ public class HTableMultiplexer {
|
||||||
private final ScheduledExecutorService executor;
|
private final ScheduledExecutorService executor;
|
||||||
private final int maxRetryInQueue;
|
private final int maxRetryInQueue;
|
||||||
private final AtomicInteger retryInQueue = new AtomicInteger(0);
|
private final AtomicInteger retryInQueue = new AtomicInteger(0);
|
||||||
|
|
||||||
public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr,
|
public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr,
|
||||||
HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize,
|
HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize,
|
||||||
ExecutorService pool, ScheduledExecutorService executor) {
|
ExecutorService pool, ScheduledExecutorService executor) {
|
||||||
|
@ -443,7 +445,7 @@ public class HTableMultiplexer {
|
||||||
private boolean resubmitFailedPut(PutStatus ps, HRegionLocation oldLoc) throws IOException {
|
private boolean resubmitFailedPut(PutStatus ps, HRegionLocation oldLoc) throws IOException {
|
||||||
// Decrease the retry count
|
// Decrease the retry count
|
||||||
final int retryCount = ps.retryCount - 1;
|
final int retryCount = ps.retryCount - 1;
|
||||||
|
|
||||||
if (retryCount <= 0) {
|
if (retryCount <= 0) {
|
||||||
// Update the failed counter and no retry any more.
|
// Update the failed counter and no retry any more.
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
|
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
|
||||||
*/
|
*/
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD",
|
||||||
justification="stub used by ipc")
|
justification="stub used by ipc")
|
||||||
|
@InterfaceAudience.Private
|
||||||
public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<T> {
|
public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<T> {
|
||||||
|
|
||||||
protected final ClusterConnection connection;
|
protected final ClusterConnection connection;
|
||||||
|
|
|
@ -41,6 +41,7 @@ public class RetriesExhaustedException extends IOException {
|
||||||
/**
|
/**
|
||||||
* Datastructure that allows adding more info around Throwable incident.
|
* Datastructure that allows adding more info around Throwable incident.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public static class ThrowableWithExtraContext {
|
public static class ThrowableWithExtraContext {
|
||||||
private final Throwable t;
|
private final Throwable t;
|
||||||
private final long when;
|
private final long when;
|
||||||
|
@ -52,7 +53,7 @@ public class RetriesExhaustedException extends IOException {
|
||||||
this.when = when;
|
this.when = when;
|
||||||
this.extras = extras;
|
this.extras = extras;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return new Date(this.when).toString() + ", " + extras + ", " + t.toString();
|
return new Date(this.when).toString() + ", " + extras + ", " + t.toString();
|
||||||
|
@ -76,6 +77,7 @@ public class RetriesExhaustedException extends IOException {
|
||||||
* @param numTries
|
* @param numTries
|
||||||
* @param exceptions List of exceptions that failed before giving up
|
* @param exceptions List of exceptions that failed before giving up
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public RetriesExhaustedException(final int numTries,
|
public RetriesExhaustedException(final int numTries,
|
||||||
final List<ThrowableWithExtraContext> exceptions) {
|
final List<ThrowableWithExtraContext> exceptions) {
|
||||||
super(getMessage(numTries, exceptions),
|
super(getMessage(numTries, exceptions),
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.RegionException;
|
import org.apache.hadoop.hbase.RegionException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets or Scans throw this exception if running without in-row scan flag
|
* Gets or Scans throw this exception if running without in-row scan flag
|
||||||
|
@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
* hbase.table.max.rowsize).
|
* hbase.table.max.rowsize).
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class RowTooBigException extends RegionException {
|
public class RowTooBigException extends RegionException {
|
||||||
|
|
||||||
public RowTooBigException(String message) {
|
public RowTooBigException(String message) {
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||||
|
@ -24,6 +25,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||||
/**
|
/**
|
||||||
* Factory to create an {@link RpcRetryingCaller}
|
* Factory to create an {@link RpcRetryingCaller}
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class RpcRetryingCallerFactory {
|
public class RpcRetryingCallerFactory {
|
||||||
|
|
||||||
/** Configuration key for a custom {@link RpcRetryingCaller} */
|
/** Configuration key for a custom {@link RpcRetryingCaller} */
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
||||||
import org.apache.hadoop.hbase.RegionLocations;
|
import org.apache.hadoop.hbase.RegionLocations;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
@ -51,6 +52,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
|
||||||
import org.htrace.Trace;
|
import org.htrace.Trace;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -59,6 +61,7 @@ import org.htrace.Trace;
|
||||||
* the first answer. If the answer comes from one of the secondary replica, it will
|
* the first answer. If the answer comes from one of the secondary replica, it will
|
||||||
* be marked as stale.
|
* be marked as stale.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class RpcRetryingCallerWithReadReplicas {
|
public class RpcRetryingCallerWithReadReplicas {
|
||||||
static final Log LOG = LogFactory.getLog(RpcRetryingCallerWithReadReplicas.class);
|
static final Log LOG = LogFactory.getLog(RpcRetryingCallerWithReadReplicas.class);
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,8 @@ public abstract class Batch {
|
||||||
* {@link Batch.Call#call(Object)}
|
* {@link Batch.Call#call(Object)}
|
||||||
* @param <R> the return type from {@link Batch.Call#call(Object)}
|
* @param <R> the return type from {@link Batch.Call#call(Object)}
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public interface Call<T,R> {
|
public interface Call<T,R> {
|
||||||
R call(T instance) throws IOException;
|
R call(T instance) throws IOException;
|
||||||
}
|
}
|
||||||
|
@ -65,6 +67,8 @@ public abstract class Batch {
|
||||||
* @param <R> the return type from the associated {@link Batch.Call#call(Object)}
|
* @param <R> the return type from the associated {@link Batch.Call#call(Object)}
|
||||||
* @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
|
* @see org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public interface Callback<R> {
|
public interface Callback<R> {
|
||||||
void update(byte[] region, byte[] row, R result);
|
void update(byte[] region, byte[] row, R result);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,8 +38,8 @@ package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thrown when the client believes that we are trying to communicate to has
|
* Thrown when the client believes that we are trying to communicate to has
|
||||||
|
|
|
@ -17,9 +17,14 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.exceptions;
|
package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception thrown if a mutation fails sanity checks.
|
* Exception thrown if a mutation fails sanity checks.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
||||||
|
|
||||||
private static final long serialVersionUID = 1788783640409186240L;
|
private static final long serialVersionUID = 1788783640409186240L;
|
||||||
|
|
|
@ -19,8 +19,15 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.exceptions;
|
package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when there is a timeout when trying to acquire a lock
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class LockTimeoutException extends DoNotRetryIOException {
|
public class LockTimeoutException extends DoNotRetryIOException {
|
||||||
|
|
||||||
private static final long serialVersionUID = -1770764924258999825L;
|
private static final long serialVersionUID = -1770764924258999825L;
|
||||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
import java.net.ConnectException;
|
import java.net.ConnectException;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,10 +19,15 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.exceptions;
|
package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An error requesting an RPC protocol that the server is not serving.
|
* An error requesting an RPC protocol that the server is not serving.
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class UnknownProtocolException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
public class UnknownProtocolException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
||||||
private Class<?> protocol;
|
private Class<?> protocol;
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,9 @@
|
||||||
package org.apache.hadoop.hbase.filter;
|
package org.apache.hadoop.hbase.filter;
|
||||||
|
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -26,7 +29,8 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
/**
|
/**
|
||||||
* A long comparator which numerical compares against the specified byte array
|
* A long comparator which numerical compares against the specified byte array
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class LongComparator extends ByteArrayComparable {
|
public class LongComparator extends ByteArrayComparable {
|
||||||
private Long longValue;
|
private Long longValue;
|
||||||
|
|
||||||
|
@ -44,6 +48,7 @@ public class LongComparator extends ByteArrayComparable {
|
||||||
/**
|
/**
|
||||||
* @return The comparator serialized using pb
|
* @return The comparator serialized using pb
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public byte [] toByteArray() {
|
public byte [] toByteArray() {
|
||||||
ComparatorProtos.LongComparator.Builder builder =
|
ComparatorProtos.LongComparator.Builder builder =
|
||||||
ComparatorProtos.LongComparator.newBuilder();
|
ComparatorProtos.LongComparator.newBuilder();
|
||||||
|
|
|
@ -79,6 +79,8 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
private Engine engine;
|
private Engine engine;
|
||||||
|
|
||||||
/** Engine implementation type (default=JAVA) */
|
/** Engine implementation type (default=JAVA) */
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public enum EngineType {
|
public enum EngineType {
|
||||||
JAVA,
|
JAVA,
|
||||||
JONI
|
JONI
|
||||||
|
@ -153,6 +155,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
/**
|
/**
|
||||||
* @return The comparator serialized using pb
|
* @return The comparator serialized using pb
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public byte [] toByteArray() {
|
public byte [] toByteArray() {
|
||||||
return engine.toByteArray();
|
return engine.toByteArray();
|
||||||
}
|
}
|
||||||
|
@ -175,7 +178,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
if (proto.hasEngine()) {
|
if (proto.hasEngine()) {
|
||||||
EngineType engine = EngineType.valueOf(proto.getEngine());
|
EngineType engine = EngineType.valueOf(proto.getEngine());
|
||||||
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
|
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags(),
|
||||||
engine);
|
engine);
|
||||||
} else {
|
} else {
|
||||||
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
|
comparator = new RegexStringComparator(proto.getPattern(), proto.getPatternFlags());
|
||||||
}
|
}
|
||||||
|
@ -195,6 +198,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
* @return true if and only if the fields of the comparator that are serialized
|
* @return true if and only if the fields of the comparator that are serialized
|
||||||
* are equal to the corresponding fields in other. Used for testing.
|
* are equal to the corresponding fields in other. Used for testing.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||||
if (other == this) return true;
|
if (other == this) return true;
|
||||||
if (!(other instanceof RegexStringComparator)) return false;
|
if (!(other instanceof RegexStringComparator)) return false;
|
||||||
|
@ -212,7 +216,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is an internal interface for abstracting access to different regular
|
* This is an internal interface for abstracting access to different regular
|
||||||
* expression matching engines.
|
* expression matching engines.
|
||||||
*/
|
*/
|
||||||
static interface Engine {
|
static interface Engine {
|
||||||
/**
|
/**
|
||||||
|
@ -220,7 +224,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
* for matching
|
* for matching
|
||||||
*/
|
*/
|
||||||
String getPattern();
|
String getPattern();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the set of configured match flags, a bit mask that may include
|
* Returns the set of configured match flags, a bit mask that may include
|
||||||
* {@link Pattern} flags
|
* {@link Pattern} flags
|
||||||
|
@ -412,7 +416,7 @@ public class RegexStringComparator extends ByteArrayComparable {
|
||||||
encoding = e.getEncoding();
|
encoding = e.getEncoding();
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalCharsetNameException(name);
|
throw new IllegalCharsetNameException(name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,11 +19,13 @@ package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple delegating controller for use with the {@link RpcControllerFactory} to help override
|
* Simple delegating controller for use with the {@link RpcControllerFactory} to help override
|
||||||
* standard behavior of a {@link PayloadCarryingRpcController}.
|
* standard behavior of a {@link PayloadCarryingRpcController}.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class DelegatingPayloadCarryingRpcController extends PayloadCarryingRpcController {
|
public class DelegatingPayloadCarryingRpcController extends PayloadCarryingRpcController {
|
||||||
private PayloadCarryingRpcController delegate;
|
private PayloadCarryingRpcController delegate;
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.nio.ByteBuffer;
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
|
@ -49,6 +50,7 @@ import com.google.protobuf.Message;
|
||||||
/**
|
/**
|
||||||
* Utility to help ipc'ing.
|
* Utility to help ipc'ing.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
class IPCUtil {
|
class IPCUtil {
|
||||||
public static final Log LOG = LogFactory.getLog(IPCUtil.class);
|
public static final Log LOG = LogFactory.getLog(IPCUtil.class);
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -22,11 +22,13 @@ import java.util.List;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.CellScannable;
|
import org.apache.hadoop.hbase.CellScannable;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
import org.apache.hadoop.hbase.util.ReflectionUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory to create a {@link PayloadCarryingRpcController}
|
* Factory to create a {@link PayloadCarryingRpcController}
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class RpcControllerFactory {
|
public class RpcControllerFactory {
|
||||||
|
|
||||||
public static final String CUSTOM_CONTROLLER_CONF_KEY = "hbase.rpc.controllerfactory.class";
|
public static final String CUSTOM_CONTROLLER_CONF_KEY = "hbase.rpc.controllerfactory.class";
|
||||||
|
@ -39,7 +41,7 @@ public class RpcControllerFactory {
|
||||||
public PayloadCarryingRpcController newController() {
|
public PayloadCarryingRpcController newController() {
|
||||||
return new PayloadCarryingRpcController();
|
return new PayloadCarryingRpcController();
|
||||||
}
|
}
|
||||||
|
|
||||||
public PayloadCarryingRpcController newController(final CellScanner cellScanner) {
|
public PayloadCarryingRpcController newController(final CellScanner cellScanner) {
|
||||||
return new PayloadCarryingRpcController(cellScanner);
|
return new PayloadCarryingRpcController(cellScanner);
|
||||||
}
|
}
|
||||||
|
@ -47,7 +49,7 @@ public class RpcControllerFactory {
|
||||||
public PayloadCarryingRpcController newController(final List<CellScannable> cellIterables) {
|
public PayloadCarryingRpcController newController(final List<CellScannable> cellIterables) {
|
||||||
return new PayloadCarryingRpcController(cellIterables);
|
return new PayloadCarryingRpcController(cellIterables);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static RpcControllerFactory instantiate(Configuration configuration) {
|
public static RpcControllerFactory instantiate(Configuration configuration) {
|
||||||
String rpcControllerFactoryClazz =
|
String rpcControllerFactoryClazz =
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.protobuf.Descriptors;
|
import com.google.protobuf.Descriptors;
|
||||||
|
@ -51,6 +52,7 @@ import com.google.protobuf.Service;
|
||||||
* </code>
|
* </code>
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ServerRpcController implements RpcController {
|
public class ServerRpcController implements RpcController {
|
||||||
/**
|
/**
|
||||||
* The exception thrown within
|
* The exception thrown within
|
||||||
|
|
|
@ -19,11 +19,15 @@
|
||||||
package org.apache.hadoop.hbase.ipc;
|
package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
import com.google.protobuf.RpcCallback;
|
import com.google.protobuf.RpcCallback;
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class TimeLimitedRpcController implements RpcController {
|
public class TimeLimitedRpcController implements RpcController {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -37,6 +37,7 @@ import java.util.Map.Entry;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
|
@ -159,6 +160,7 @@ import com.google.protobuf.TextFormat;
|
||||||
*/
|
*/
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED",
|
||||||
justification="None. Address sometime.")
|
justification="None. Address sometime.")
|
||||||
|
@InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class
|
||||||
public final class ProtobufUtil {
|
public final class ProtobufUtil {
|
||||||
|
|
||||||
private ProtobufUtil() {
|
private ProtobufUtil() {
|
||||||
|
|
|
@ -19,10 +19,12 @@
|
||||||
package org.apache.hadoop.hbase.quotas;
|
package org.apache.hadoop.hbase.quotas;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic quota exceeded exception for invalid settings
|
* Generic quota exceeded exception for invalid settings
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class InvalidQuotaSettingsException extends DoNotRetryIOException {
|
public class InvalidQuotaSettingsException extends DoNotRetryIOException {
|
||||||
public InvalidQuotaSettingsException(String msg) {
|
public InvalidQuotaSettingsException(String msg) {
|
||||||
super(msg);
|
super(msg);
|
||||||
|
|
|
@ -19,10 +19,14 @@
|
||||||
package org.apache.hadoop.hbase.quotas;
|
package org.apache.hadoop.hbase.quotas;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic quota exceeded exception
|
* Generic quota exceeded exception
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class QuotaExceededException extends DoNotRetryIOException {
|
public class QuotaExceededException extends DoNotRetryIOException {
|
||||||
public QuotaExceededException(String msg) {
|
public QuotaExceededException(String msg) {
|
||||||
super(msg);
|
super(msg);
|
||||||
|
|
|
@ -23,6 +23,8 @@ import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Describe the throttling result.
|
* Describe the throttling result.
|
||||||
|
@ -31,11 +33,15 @@ import org.apache.commons.logging.LogFactory;
|
||||||
* operation to go on the server if the waitInterval is grater than the one got
|
* operation to go on the server if the waitInterval is grater than the one got
|
||||||
* as result of this exception.
|
* as result of this exception.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class ThrottlingException extends QuotaExceededException {
|
public class ThrottlingException extends QuotaExceededException {
|
||||||
private static final long serialVersionUID = 1406576492085155743L;
|
private static final long serialVersionUID = 1406576492085155743L;
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(ThrottlingException.class);
|
private static final Log LOG = LogFactory.getLog(ThrottlingException.class);
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public enum Type {
|
public enum Type {
|
||||||
NumRequestsExceeded,
|
NumRequestsExceeded,
|
||||||
NumReadRequestsExceeded,
|
NumReadRequestsExceeded,
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
package org.apache.hadoop.hbase.replication;
|
package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.exceptions.HBaseException;
|
import org.apache.hadoop.hbase.exceptions.HBaseException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.exceptions.HBaseException;
|
||||||
* store, loss of connection to a peer cluster or errors during deserialization of replication data.
|
* store, loss of connection to a peer cluster or errors during deserialization of replication data.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class ReplicationException extends HBaseException {
|
public class ReplicationException extends HBaseException {
|
||||||
|
|
||||||
private static final long serialVersionUID = -8885598603988198062L;
|
private static final long serialVersionUID = -8885598603988198062L;
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.replication;
|
package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
|
@ -26,6 +27,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
/**
|
/**
|
||||||
* A factory class for instantiating replication objects that deal with replication state.
|
* A factory class for instantiating replication objects that deal with replication state.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationFactory {
|
public class ReplicationFactory {
|
||||||
|
|
||||||
public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk,
|
public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk,
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||||
|
@ -75,6 +76,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
*
|
*
|
||||||
* /hbase/replication/peers/1/tableCFs [Value: "table1; table2:cf1,cf3; table3:cfx,cfy"]
|
* /hbase/replication/peers/1/tableCFs [Value: "table1; table2:cf1,cf3; table3:cfx,cfy"]
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements ReplicationPeers {
|
public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements ReplicationPeers {
|
||||||
|
|
||||||
// Map of peer clusters keyed by their id
|
// Map of peer clusters keyed by their id
|
||||||
|
@ -110,16 +112,16 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
||||||
throw new IllegalArgumentException("Cannot add a peer with id=" + id
|
throw new IllegalArgumentException("Cannot add a peer with id=" + id
|
||||||
+ " because that id already exists.");
|
+ " because that id already exists.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(id.contains("-")){
|
if(id.contains("-")){
|
||||||
throw new IllegalArgumentException("Found invalid peer name:" + id);
|
throw new IllegalArgumentException("Found invalid peer name:" + id);
|
||||||
}
|
}
|
||||||
|
|
||||||
ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
|
ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
|
||||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||||
ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id),
|
ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id),
|
||||||
toByteArray(peerConfig));
|
toByteArray(peerConfig));
|
||||||
// There is a race (if hbase.zookeeper.useMulti is false)
|
// There is a race (if hbase.zookeeper.useMulti is false)
|
||||||
// b/w PeerWatcher and ReplicationZookeeper#add method to create the
|
// b/w PeerWatcher and ReplicationZookeeper#add method to create the
|
||||||
// peer-state znode. This happens while adding a peer
|
// peer-state znode. This happens while adding a peer
|
||||||
// The peer state data is set as "ENABLED" by default.
|
// The peer state data is set as "ENABLED" by default.
|
||||||
|
|
|
@ -25,6 +25,8 @@ import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -32,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
* It will extract the peerId if it's recovered as well as the dead region servers
|
* It will extract the peerId if it's recovered as well as the dead region servers
|
||||||
* that were part of the queue's history.
|
* that were part of the queue's history.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationQueueInfo {
|
public class ReplicationQueueInfo {
|
||||||
private static final Log LOG = LogFactory.getLog(ReplicationQueueInfo.class);
|
private static final Log LOG = LogFactory.getLog(ReplicationQueueInfo.class);
|
||||||
|
|
||||||
|
|
|
@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This provides an interface for clients of replication to view replication queues. These queues
|
* This provides an interface for clients of replication to view replication queues. These queues
|
||||||
* keep track of the WALs that still need to be replicated to remote clusters.
|
* keep track of the WALs that still need to be replicated to remote clusters.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public interface ReplicationQueuesClient {
|
public interface ReplicationQueuesClient {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implements
|
public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implements
|
||||||
ReplicationQueuesClient {
|
ReplicationQueuesClient {
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.TreeSet;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -60,6 +61,7 @@ import org.apache.zookeeper.KeeperException;
|
||||||
*
|
*
|
||||||
* /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 254]
|
* /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 254]
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements ReplicationQueues {
|
public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements ReplicationQueues {
|
||||||
|
|
||||||
/** Znode containing all replication queues for this region server. */
|
/** Znode containing all replication queues for this region server. */
|
||||||
|
@ -69,7 +71,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
|
private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
|
||||||
|
|
||||||
public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
|
public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
|
||||||
Abortable abortable) {
|
Abortable abortable) {
|
||||||
super(zk, conf, abortable);
|
super(zk, conf, abortable);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
@ -32,6 +33,7 @@ import org.apache.zookeeper.KeeperException;
|
||||||
/**
|
/**
|
||||||
* This is a base class for maintaining replication state in zookeeper.
|
* This is a base class for maintaining replication state in zookeeper.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public abstract class ReplicationStateZKBase {
|
public abstract class ReplicationStateZKBase {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Abortable;
|
import org.apache.hadoop.hbase.Abortable;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
|
@ -37,6 +38,7 @@ import org.apache.zookeeper.KeeperException;
|
||||||
* responsible for handling replication events that are defined in the ReplicationListener
|
* responsible for handling replication events that are defined in the ReplicationListener
|
||||||
* interface.
|
* interface.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements ReplicationTracker {
|
public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements ReplicationTracker {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(ReplicationTrackerZKImpl.class);
|
private static final Log LOG = LogFactory.getLog(ReplicationTrackerZKImpl.class);
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class HBaseSaslRpcClient {
|
||||||
private final boolean fallbackAllowed;
|
private final boolean fallbackAllowed;
|
||||||
/**
|
/**
|
||||||
* Create a HBaseSaslRpcClient for an authentication method
|
* Create a HBaseSaslRpcClient for an authentication method
|
||||||
*
|
*
|
||||||
* @param method
|
* @param method
|
||||||
* the requested authentication method
|
* the requested authentication method
|
||||||
* @param token
|
* @param token
|
||||||
|
@ -75,11 +75,11 @@ public class HBaseSaslRpcClient {
|
||||||
public HBaseSaslRpcClient(AuthMethod method,
|
public HBaseSaslRpcClient(AuthMethod method,
|
||||||
Token<? extends TokenIdentifier> token, String serverPrincipal, boolean fallbackAllowed)
|
Token<? extends TokenIdentifier> token, String serverPrincipal, boolean fallbackAllowed)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this(method, token, serverPrincipal, fallbackAllowed, "authentication");
|
this(method, token, serverPrincipal, fallbackAllowed, "authentication");
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Create a HBaseSaslRpcClient for an authentication method
|
* Create a HBaseSaslRpcClient for an authentication method
|
||||||
*
|
*
|
||||||
* @param method
|
* @param method
|
||||||
* the requested authentication method
|
* the requested authentication method
|
||||||
* @param token
|
* @param token
|
||||||
|
@ -134,8 +134,8 @@ public class HBaseSaslRpcClient {
|
||||||
throw new IOException("Unable to find SASL client implementation");
|
throw new IOException("Unable to find SASL client implementation");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected SaslClient createDigestSaslClient(String[] mechanismNames,
|
protected SaslClient createDigestSaslClient(String[] mechanismNames,
|
||||||
String saslDefaultRealm, CallbackHandler saslClientCallbackHandler)
|
String saslDefaultRealm, CallbackHandler saslClientCallbackHandler)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return Sasl.createSaslClient(mechanismNames, null, null, saslDefaultRealm,
|
return Sasl.createSaslClient(mechanismNames, null, null, saslDefaultRealm,
|
||||||
SaslUtil.SASL_PROPS, saslClientCallbackHandler);
|
SaslUtil.SASL_PROPS, saslClientCallbackHandler);
|
||||||
|
@ -143,7 +143,7 @@ public class HBaseSaslRpcClient {
|
||||||
|
|
||||||
protected SaslClient createKerberosSaslClient(String[] mechanismNames,
|
protected SaslClient createKerberosSaslClient(String[] mechanismNames,
|
||||||
String userFirstPart, String userSecondPart) throws IOException {
|
String userFirstPart, String userSecondPart) throws IOException {
|
||||||
return Sasl.createSaslClient(mechanismNames, null, userFirstPart,
|
return Sasl.createSaslClient(mechanismNames, null, userFirstPart,
|
||||||
userSecondPart, SaslUtil.SASL_PROPS, null);
|
userSecondPart, SaslUtil.SASL_PROPS, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,16 +154,16 @@ public class HBaseSaslRpcClient {
|
||||||
WritableUtils.readString(inStream));
|
WritableUtils.readString(inStream));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do client side SASL authentication with server via the given InputStream
|
* Do client side SASL authentication with server via the given InputStream
|
||||||
* and OutputStream
|
* and OutputStream
|
||||||
*
|
*
|
||||||
* @param inS
|
* @param inS
|
||||||
* InputStream to use
|
* InputStream to use
|
||||||
* @param outS
|
* @param outS
|
||||||
* OutputStream to use
|
* OutputStream to use
|
||||||
* @return true if connection is set up, or false if needs to switch
|
* @return true if connection is set up, or false if needs to switch
|
||||||
* to simple Auth.
|
* to simple Auth.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@ -243,7 +243,7 @@ public class HBaseSaslRpcClient {
|
||||||
/**
|
/**
|
||||||
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has
|
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has
|
||||||
* been called.
|
* been called.
|
||||||
*
|
*
|
||||||
* @param in
|
* @param in
|
||||||
* the InputStream to wrap
|
* the InputStream to wrap
|
||||||
* @return a SASL wrapped InputStream
|
* @return a SASL wrapped InputStream
|
||||||
|
@ -259,7 +259,7 @@ public class HBaseSaslRpcClient {
|
||||||
/**
|
/**
|
||||||
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has
|
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has
|
||||||
* been called.
|
* been called.
|
||||||
*
|
*
|
||||||
* @param out
|
* @param out
|
||||||
* the OutputStream to wrap
|
* the OutputStream to wrap
|
||||||
* @return a SASL wrapped OutputStream
|
* @return a SASL wrapped OutputStream
|
||||||
|
@ -287,6 +287,7 @@ public class HBaseSaslRpcClient {
|
||||||
this.userPassword = SaslUtil.encodePassword(token.getPassword());
|
this.userPassword = SaslUtil.encodePassword(token.getPassword());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void handle(Callback[] callbacks)
|
public void handle(Callback[] callbacks)
|
||||||
throws UnsupportedCallbackException {
|
throws UnsupportedCallbackException {
|
||||||
NameCallback nc = null;
|
NameCallback nc = null;
|
||||||
|
|
|
@ -43,6 +43,9 @@ import com.google.common.collect.Maps;
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class Permission extends VersionedWritable {
|
public class Permission extends VersionedWritable {
|
||||||
protected static final byte VERSION = 0;
|
protected static final byte VERSION = 0;
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public enum Action {
|
public enum Action {
|
||||||
READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A');
|
READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A');
|
||||||
|
|
||||||
|
@ -148,6 +151,7 @@ public class Permission extends VersionedWritable {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder str = new StringBuilder("[Permission: ")
|
StringBuilder str = new StringBuilder("[Permission: ")
|
||||||
.append("actions=");
|
.append("actions=");
|
||||||
|
@ -167,6 +171,7 @@ public class Permission extends VersionedWritable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return the object version number */
|
/** @return the object version number */
|
||||||
|
@Override
|
||||||
public byte getVersion() {
|
public byte getVersion() {
|
||||||
return VERSION;
|
return VERSION;
|
||||||
}
|
}
|
||||||
|
|
|
@ -354,6 +354,7 @@ public class TablePermission extends Permission {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder str = new StringBuilder("[TablePermission: ");
|
StringBuilder str = new StringBuilder("[TablePermission: ");
|
||||||
if(namespace != null) {
|
if(namespace != null) {
|
||||||
|
|
|
@ -183,6 +183,7 @@ public class UserPermission extends TablePermission {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder str = new StringBuilder("UserPermission: ")
|
StringBuilder str = new StringBuilder("UserPermission: ")
|
||||||
.append("user=").append(Bytes.toString(user))
|
.append("user=").append(Bytes.toString(user))
|
||||||
|
|
|
@ -42,7 +42,7 @@ public class AuthenticationTokenIdentifier extends TokenIdentifier {
|
||||||
protected long issueDate;
|
protected long issueDate;
|
||||||
protected long expirationDate;
|
protected long expirationDate;
|
||||||
protected long sequenceNumber;
|
protected long sequenceNumber;
|
||||||
|
|
||||||
public AuthenticationTokenIdentifier() {
|
public AuthenticationTokenIdentifier() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.security.visibility;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This exception indicates that VisibilityController hasn't finished initialization.
|
* This exception indicates that VisibilityController hasn't finished initialization.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class VisibilityControllerNotReadyException extends IOException {
|
public class VisibilityControllerNotReadyException extends IOException {
|
||||||
|
|
||||||
private static final long serialVersionUID = 1725986525207989173L;
|
private static final long serialVersionUID = 1725986525207989173L;
|
||||||
|
|
|
@ -18,11 +18,13 @@
|
||||||
package org.apache.hadoop.hbase.snapshot;
|
package org.apache.hadoop.hbase.snapshot;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thrown when a snapshot could not be exported due to an error during the operation.
|
* Thrown when a snapshot could not be exported due to an error during the operation.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
public class ExportSnapshotException extends HBaseSnapshotException {
|
public class ExportSnapshotException extends HBaseSnapshotException {
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,273 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.lang.annotation.Annotation;
|
||||||
|
import java.lang.reflect.Modifier;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.apache.hadoop.hbase.ClassFinder.And;
|
||||||
|
import org.apache.hadoop.hbase.ClassFinder.FileNameFilter;
|
||||||
|
import org.apache.hadoop.hbase.ClassFinder.Not;
|
||||||
|
import org.apache.hadoop.hbase.ClassTestFinder.TestClassFilter;
|
||||||
|
import org.apache.hadoop.hbase.ClassTestFinder.TestFileNameFilter;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test cases for ensuring our client visible classes have annotations
|
||||||
|
* for {@link InterfaceAudience}.
|
||||||
|
*
|
||||||
|
* All classes in hbase-client and hbase-common module MUST have InterfaceAudience
|
||||||
|
* annotations. All InterfaceAudience.Public annotated classes MUST also have InterfaceStability
|
||||||
|
* annotations. Think twice about marking an interface InterfaceAudience.Public. Make sure that
|
||||||
|
* it is an interface, not a class (for most cases), and clients will actually depend on it. Once
|
||||||
|
* something is marked with Public, we cannot change the signatures within the major release. NOT
|
||||||
|
* everything in the hbase-client module or every java public class has to be marked with
|
||||||
|
* InterfaceAudience.Public. ONLY the ones that an hbase application will directly use (Table, Get,
|
||||||
|
* etc, versus ProtobufUtil).
|
||||||
|
*
|
||||||
|
* Also note that HBase has it's own annotations in hbase-annotations module with the same names
|
||||||
|
* as in Hadoop. You should use the HBase's classes.
|
||||||
|
*
|
||||||
|
* See https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
|
||||||
|
* and https://issues.apache.org/jira/browse/HBASE-10462.
|
||||||
|
*/
|
||||||
|
@Category(SmallTests.class)
|
||||||
|
public class TestInterfaceAudienceAnnotations {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class);
|
||||||
|
|
||||||
|
/** Selects classes with generated in their package name */
|
||||||
|
class GeneratedClassFilter implements ClassFinder.ClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
return c.getPackage().getName().contains("generated");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Selects classes with one of the {@link InterfaceAudience} annotation in their class
|
||||||
|
* declaration.
|
||||||
|
*/
|
||||||
|
class InterfaceAudienceAnnotatedClassFilter implements ClassFinder.ClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
if (getAnnotation(c) != null) {
|
||||||
|
// class itself has a declared annotation.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is an internal class, look for the encapsulating class to see whether it has
|
||||||
|
// annotation. All inner classes of private classes are considered annotated.
|
||||||
|
return isAnnotatedPrivate(c.getEnclosingClass());
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isAnnotatedPrivate(Class<?> c) {
|
||||||
|
if (c == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Class<?> ann = getAnnotation(c);
|
||||||
|
if (ann != null &&
|
||||||
|
!InterfaceAudience.Public.class.equals(ann)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return isAnnotatedPrivate(c.getEnclosingClass());
|
||||||
|
}
|
||||||
|
|
||||||
|
protected Class<?> getAnnotation(Class<?> c) {
|
||||||
|
// we should get only declared annotations, not inherited ones
|
||||||
|
Annotation[] anns = c.getDeclaredAnnotations();
|
||||||
|
|
||||||
|
for (Annotation ann : anns) {
|
||||||
|
// Hadoop clearly got it wrong for not making the annotation values (private, public, ..)
|
||||||
|
// an enum instead we have three independent annotations!
|
||||||
|
Class<?> type = ann.annotationType();
|
||||||
|
if (isInterfaceAudienceClass(type)) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Selects classes with one of the {@link InterfaceStability} annotation in their class
|
||||||
|
* declaration.
|
||||||
|
*/
|
||||||
|
class InterfaceStabilityAnnotatedClassFilter implements ClassFinder.ClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
if (getAnnotation(c) != null) {
|
||||||
|
// class itself has a declared annotation.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected Class<?> getAnnotation(Class<?> c) {
|
||||||
|
// we should get only declared annotations, not inherited ones
|
||||||
|
Annotation[] anns = c.getDeclaredAnnotations();
|
||||||
|
|
||||||
|
for (Annotation ann : anns) {
|
||||||
|
// Hadoop clearly got it wrong for not making the annotation values (private, public, ..)
|
||||||
|
// an enum instead we have three independent annotations!
|
||||||
|
Class<?> type = ann.annotationType();
|
||||||
|
if (isInterfaceStabilityClass(type)) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Selects classes with one of the {@link InterfaceAudience.Public} annotation in their
|
||||||
|
* class declaration.
|
||||||
|
*/
|
||||||
|
class InterfaceAudiencePublicAnnotatedClassFilter extends InterfaceAudienceAnnotatedClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
return (InterfaceAudience.Public.class.equals(getAnnotation(c)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Selects InterfaceAudience or InterfaceStability classes. Don't go meta!!!
|
||||||
|
*/
|
||||||
|
class IsInterfaceStabilityClassFilter implements ClassFinder.ClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
return
|
||||||
|
isInterfaceAudienceClass(c) ||
|
||||||
|
isInterfaceStabilityClass(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isInterfaceAudienceClass(Class<?> c) {
|
||||||
|
return
|
||||||
|
c.equals(InterfaceAudience.Public.class) ||
|
||||||
|
c.equals(InterfaceAudience.Private.class) ||
|
||||||
|
c.equals(InterfaceAudience.LimitedPrivate.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isInterfaceStabilityClass(Class<?> c) {
|
||||||
|
return
|
||||||
|
c.equals(InterfaceStability.Stable.class) ||
|
||||||
|
c.equals(InterfaceStability.Unstable.class) ||
|
||||||
|
c.equals(InterfaceStability.Evolving.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Selects classes that are declared public */
|
||||||
|
class PublicClassFilter implements ClassFinder.ClassFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
int mod = c.getModifiers();
|
||||||
|
return Modifier.isPublic(mod);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Selects paths (jars and class dirs) only from the main code, not test classes */
|
||||||
|
class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter {
|
||||||
|
@Override
|
||||||
|
public boolean isCandidatePath(String resourcePath, boolean isJar) {
|
||||||
|
return !resourcePath.contains("test-classes") &&
|
||||||
|
!resourcePath.contains("tests.jar");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether all the classes in client and common modules contain
|
||||||
|
* {@link InterfaceAudience} annotations.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testInterfaceAudienceAnnotation()
|
||||||
|
throws ClassNotFoundException, IOException, LinkageError {
|
||||||
|
|
||||||
|
// find classes that are:
|
||||||
|
// In the main jar
|
||||||
|
// AND are public
|
||||||
|
// NOT test classes
|
||||||
|
// AND NOT generated classes
|
||||||
|
// AND are NOT annotated with InterfaceAudience
|
||||||
|
ClassFinder classFinder = new ClassFinder(
|
||||||
|
new MainCodeResourcePathFilter(),
|
||||||
|
new Not((FileNameFilter)new TestFileNameFilter()),
|
||||||
|
new And(new PublicClassFilter(),
|
||||||
|
new Not(new TestClassFilter()),
|
||||||
|
new Not(new GeneratedClassFilter()),
|
||||||
|
new Not(new IsInterfaceStabilityClassFilter()),
|
||||||
|
new Not(new InterfaceAudienceAnnotatedClassFilter()))
|
||||||
|
);
|
||||||
|
|
||||||
|
Set<Class<?>> classes = classFinder.findClasses(false);
|
||||||
|
|
||||||
|
LOG.info("These are the classes that DO NOT have @InterfaceAudience annotation:");
|
||||||
|
for (Class<?> clazz : classes) {
|
||||||
|
LOG.info(clazz);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertEquals("All classes should have @InterfaceAudience annotation",
|
||||||
|
0, classes.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether all the classes in client and common modules that are marked
|
||||||
|
* InterfaceAudience.Public also have {@link InterfaceStability} annotations.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testInterfaceStabilityAnnotation()
|
||||||
|
throws ClassNotFoundException, IOException, LinkageError {
|
||||||
|
|
||||||
|
// find classes that are:
|
||||||
|
// In the main jar
|
||||||
|
// AND are public
|
||||||
|
// NOT test classes
|
||||||
|
// AND NOT generated classes
|
||||||
|
// AND are annotated with InterfaceAudience.Public
|
||||||
|
// AND NOT annotated with InterfaceStability
|
||||||
|
ClassFinder classFinder = new ClassFinder(
|
||||||
|
new MainCodeResourcePathFilter(),
|
||||||
|
new Not((FileNameFilter)new TestFileNameFilter()),
|
||||||
|
new And(new PublicClassFilter(),
|
||||||
|
new Not(new TestClassFilter()),
|
||||||
|
new Not(new GeneratedClassFilter()),
|
||||||
|
new InterfaceAudiencePublicAnnotatedClassFilter(),
|
||||||
|
new Not(new IsInterfaceStabilityClassFilter()),
|
||||||
|
new Not(new InterfaceStabilityAnnotatedClassFilter()))
|
||||||
|
);
|
||||||
|
|
||||||
|
Set<Class<?>> classes = classFinder.findClasses(false);
|
||||||
|
|
||||||
|
LOG.info("These are the classes that DO NOT have @InterfaceStability annotation:");
|
||||||
|
for (Class<?> clazz : classes) {
|
||||||
|
LOG.info(clazz);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.assertEquals("All classes that are marked with @InterfaceAudience.Public should "
|
||||||
|
+ "have @InterfaceStability annotation as well",
|
||||||
|
0, classes.size());
|
||||||
|
}
|
||||||
|
}
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
|
@ -26,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
* only sets the configuration through the {@link #setConf(Configuration)}
|
* only sets the configuration through the {@link #setConf(Configuration)}
|
||||||
* method
|
* method
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class BaseConfigurable implements Configurable {
|
public class BaseConfigurable implements Configurable {
|
||||||
|
|
||||||
private Configuration conf;
|
private Configuration conf;
|
||||||
|
|
|
@ -84,6 +84,7 @@ public final class HConstants {
|
||||||
/**
|
/**
|
||||||
* Status codes used for return values of bulk operations.
|
* Status codes used for return values of bulk operations.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public enum OperationStatusCode {
|
public enum OperationStatusCode {
|
||||||
NOT_RUN,
|
NOT_RUN,
|
||||||
SUCCESS,
|
SUCCESS,
|
||||||
|
@ -570,6 +571,7 @@ public final class HConstants {
|
||||||
public static final String REGION_IMPL = "hbase.hregion.impl";
|
public static final String REGION_IMPL = "hbase.hregion.impl";
|
||||||
|
|
||||||
/** modifyTable op for replacing the table descriptor */
|
/** modifyTable op for replacing the table descriptor */
|
||||||
|
@InterfaceAudience.Private
|
||||||
public static enum Modify {
|
public static enum Modify {
|
||||||
CLOSE_REGION,
|
CLOSE_REGION,
|
||||||
TABLE_COMPACT,
|
TABLE_COMPACT,
|
||||||
|
|
|
@ -25,6 +25,8 @@ import java.lang.annotation.Retention;
|
||||||
import java.lang.annotation.RetentionPolicy;
|
import java.lang.annotation.RetentionPolicy;
|
||||||
import java.lang.annotation.Target;
|
import java.lang.annotation.Target;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The field or the parameter to which this annotation can be applied only when it
|
* The field or the parameter to which this annotation can be applied only when it
|
||||||
* holds mutations for hbase:meta table.
|
* holds mutations for hbase:meta table.
|
||||||
|
@ -32,6 +34,7 @@ import java.lang.annotation.Target;
|
||||||
@Documented
|
@Documented
|
||||||
@Target( { ElementType.LOCAL_VARIABLE, ElementType.PARAMETER })
|
@Target( { ElementType.LOCAL_VARIABLE, ElementType.PARAMETER })
|
||||||
@Retention(RetentionPolicy.CLASS)
|
@Retention(RetentionPolicy.CLASS)
|
||||||
|
@InterfaceAudience.Private
|
||||||
public @interface MetaMutationAnnotation {
|
public @interface MetaMutationAnnotation {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,6 +161,8 @@ public class NamespaceDescriptor {
|
||||||
return new Builder(ns);
|
return new Builder(ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public static class Builder {
|
public static class Builder {
|
||||||
private String bName;
|
private String bName;
|
||||||
private Map<String, String> bConfiguration = new TreeMap<String, String>();
|
private Map<String, String> bConfiguration = new TreeMap<String, String>();
|
||||||
|
@ -173,7 +175,7 @@ public class NamespaceDescriptor {
|
||||||
private Builder(String name) {
|
private Builder(String name) {
|
||||||
this.bName = name;
|
this.bName = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder addConfiguration(Map<String, String> configuration) {
|
public Builder addConfiguration(Map<String, String> configuration) {
|
||||||
this.bConfiguration.putAll(configuration);
|
this.bConfiguration.putAll(configuration);
|
||||||
return this;
|
return this;
|
||||||
|
@ -193,7 +195,7 @@ public class NamespaceDescriptor {
|
||||||
if (this.bName == null){
|
if (this.bName == null){
|
||||||
throw new IllegalArgumentException("A name has to be specified in a namespace.");
|
throw new IllegalArgumentException("A name has to be specified in a namespace.");
|
||||||
}
|
}
|
||||||
|
|
||||||
NamespaceDescriptor desc = new NamespaceDescriptor(this.bName);
|
NamespaceDescriptor desc = new NamespaceDescriptor(this.bName);
|
||||||
desc.configuration = this.bConfiguration;
|
desc.configuration = this.bConfiguration;
|
||||||
return desc;
|
return desc;
|
||||||
|
|
|
@ -228,6 +228,8 @@ implements WritableComparable<ImmutableBytesWritable> {
|
||||||
|
|
||||||
/** A Comparator optimized for ImmutableBytesWritable.
|
/** A Comparator optimized for ImmutableBytesWritable.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public static class Comparator extends WritableComparator {
|
public static class Comparator extends WritableComparator {
|
||||||
private BytesWritable.Comparator comparator =
|
private BytesWritable.Comparator comparator =
|
||||||
new BytesWritable.Comparator();
|
new BytesWritable.Comparator();
|
||||||
|
|
|
@ -26,11 +26,14 @@ import java.io.FilterInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copied from guava source code v15 (LimitedInputStream)
|
* Copied from guava source code v15 (LimitedInputStream)
|
||||||
* Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here
|
* Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here
|
||||||
* allows to be compatible with guava 11 to 15+.
|
* allows to be compatible with guava 11 to 15+.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public final class LimitInputStream extends FilterInputStream {
|
public final class LimitInputStream extends FilterInputStream {
|
||||||
private long left;
|
private long left;
|
||||||
private long mark = -1;
|
private long mark = -1;
|
||||||
|
|
|
@ -56,6 +56,8 @@ public final class Encryption {
|
||||||
/**
|
/**
|
||||||
* Crypto context
|
* Crypto context
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public static class Context extends org.apache.hadoop.hbase.io.crypto.Context {
|
public static class Context extends org.apache.hadoop.hbase.io.crypto.Context {
|
||||||
|
|
||||||
/** The null crypto context */
|
/** The null crypto context */
|
||||||
|
|
|
@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.io.hadoopbackport;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The ThrottleInputStream provides bandwidth throttling on a specified
|
* The ThrottleInputStream provides bandwidth throttling on a specified
|
||||||
* InputStream. It is implemented as a wrapper on top of another InputStream
|
* InputStream. It is implemented as a wrapper on top of another InputStream
|
||||||
|
@ -31,6 +33,7 @@ import java.io.InputStream;
|
||||||
* (Thus, while the read-rate might exceed the maximum for a given short interval,
|
* (Thus, while the read-rate might exceed the maximum for a given short interval,
|
||||||
* the average tends towards the specified maximum, overall.)
|
* the average tends towards the specified maximum, overall.)
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ThrottledInputStream extends InputStream {
|
public class ThrottledInputStream extends InputStream {
|
||||||
|
|
||||||
private final InputStream rawStream;
|
private final InputStream rawStream;
|
||||||
|
@ -47,7 +50,7 @@ public class ThrottledInputStream extends InputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
public ThrottledInputStream(InputStream rawStream, long maxBytesPerSec) {
|
public ThrottledInputStream(InputStream rawStream, long maxBytesPerSec) {
|
||||||
assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
|
assert maxBytesPerSec > 0 : "Bandwidth " + maxBytesPerSec + " is invalid";
|
||||||
this.rawStream = rawStream;
|
this.rawStream = rawStream;
|
||||||
this.maxBytesPerSec = maxBytesPerSec;
|
this.maxBytesPerSec = maxBytesPerSec;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.util.ReflectionUtils;
|
||||||
/**
|
/**
|
||||||
* Provide an instance of a user. Allows custom {@link User} creation.
|
* Provide an instance of a user. Allows custom {@link User} creation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class UserProvider extends BaseConfigurable {
|
public class UserProvider extends BaseConfigurable {
|
||||||
|
|
||||||
|
|
|
@ -19,8 +19,10 @@
|
||||||
package org.apache.hadoop.hbase.trace;
|
package org.apache.hadoop.hbase.trace;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.htrace.HTraceConfiguration;
|
import org.htrace.HTraceConfiguration;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class HBaseHTraceConfiguration extends HTraceConfiguration {
|
public class HBaseHTraceConfiguration extends HTraceConfiguration {
|
||||||
|
|
||||||
public static final String KEY_PREFIX = "hbase.";
|
public static final String KEY_PREFIX = "hbase.";
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.HashSet;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.htrace.SpanReceiver;
|
import org.htrace.SpanReceiver;
|
||||||
import org.htrace.Trace;
|
import org.htrace.Trace;
|
||||||
|
|
||||||
|
@ -32,6 +33,7 @@ import org.htrace.Trace;
|
||||||
* hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those
|
* hbase-site.xml, adding those SpanReceivers to the Tracer, and closing those
|
||||||
* SpanReceivers when appropriate.
|
* SpanReceivers when appropriate.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class SpanReceiverHost {
|
public class SpanReceiverHost {
|
||||||
public static final String SPAN_RECEIVERS_CONF_KEY = "hbase.trace.spanreceiver.classes";
|
public static final String SPAN_RECEIVERS_CONF_KEY = "hbase.trace.spanreceiver.classes";
|
||||||
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
|
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.types;
|
package org.apache.hadoop.hbase.types;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||||
|
|
||||||
|
@ -28,6 +30,8 @@ import com.google.protobuf.Message;
|
||||||
* A base-class for {@link DataType} implementations backed by protobuf. See
|
* A base-class for {@link DataType} implementations backed by protobuf. See
|
||||||
* {@code PBKeyValue} in {@code hbase-examples} module.
|
* {@code PBKeyValue} in {@code hbase-examples} module.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public abstract class PBType<T extends Message> implements DataType<T> {
|
public abstract class PBType<T extends Message> implements DataType<T> {
|
||||||
@Override
|
@Override
|
||||||
public boolean isOrderPreserving() {
|
public boolean isOrderPreserving() {
|
||||||
|
|
|
@ -17,9 +17,14 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An abstract implementation of the ByteRange API
|
* An abstract implementation of the ByteRange API
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public abstract class AbstractByteRange implements ByteRange {
|
public abstract class AbstractByteRange implements ByteRange {
|
||||||
|
|
||||||
public static final int UNSET_HASH_VALUE = -1;
|
public static final int UNSET_HASH_VALUE = -1;
|
||||||
|
|
|
@ -19,6 +19,9 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -28,6 +31,8 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* {@link #compareTo(ByteRange)}, {@link #hashCode()}, or
|
* {@link #compareTo(ByteRange)}, {@link #hashCode()}, or
|
||||||
* {@link #equals(Object)}. {@code Position} is retained by copy operations.
|
* {@link #equals(Object)}. {@code Position} is retained by copy operations.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
|
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
|
||||||
PositionedByteRange {
|
PositionedByteRange {
|
||||||
/**
|
/**
|
||||||
|
@ -74,7 +79,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
||||||
/**
|
/**
|
||||||
* Update the beginning of this range. {@code offset + length} may not be
|
* Update the beginning of this range. {@code offset + length} may not be
|
||||||
* greater than {@code bytes.length}. Resets {@code position} to 0.
|
* greater than {@code bytes.length}. Resets {@code position} to 0.
|
||||||
*
|
*
|
||||||
* @param offset
|
* @param offset
|
||||||
* the new start of this range.
|
* the new start of this range.
|
||||||
* @return this.
|
* @return this.
|
||||||
|
@ -90,7 +95,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
||||||
* Update the length of this range. {@code offset + length} should not be
|
* Update the length of this range. {@code offset + length} should not be
|
||||||
* greater than {@code bytes.length}. If {@code position} is greater than the
|
* greater than {@code bytes.length}. If {@code position} is greater than the
|
||||||
* new {@code length}, sets {@code position} to {@code length}.
|
* new {@code length}, sets {@code position} to {@code length}.
|
||||||
*
|
*
|
||||||
* @param length
|
* @param length
|
||||||
* The new length of this range.
|
* The new length of this range.
|
||||||
* @return this.
|
* @return this.
|
||||||
|
@ -153,28 +158,28 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange put(byte[] val, int offset, int length);
|
public abstract PositionedByteRange put(byte[] val, int offset, int length);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putInt(int index, int val);
|
public abstract PositionedByteRange putInt(int index, int val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putLong(int index, long val);
|
public abstract PositionedByteRange putLong(int index, long val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putShort(int index, short val);
|
public abstract PositionedByteRange putShort(int index, short val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putInt(int val);
|
public abstract PositionedByteRange putInt(int val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putLong(long val);
|
public abstract PositionedByteRange putLong(long val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract PositionedByteRange putShort(short val);
|
public abstract PositionedByteRange putShort(short val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract int putVLong(int index, long val);
|
public abstract int putVLong(int index, long val);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract int putVLong(long val);
|
public abstract int putVLong(long val);
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1456,6 +1456,8 @@ public class Base64 {
|
||||||
* @see Base64
|
* @see Base64
|
||||||
* @since 1.3
|
* @since 1.3
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public static class Base64OutputStream extends FilterOutputStream {
|
public static class Base64OutputStream extends FilterOutputStream {
|
||||||
private boolean encode;
|
private boolean encode;
|
||||||
private int position;
|
private int position;
|
||||||
|
|
|
@ -30,12 +30,15 @@ import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.FutureTask;
|
import java.util.concurrent.FutureTask;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A completion service, close to the one available in the JDK 1.7
|
* A completion service, close to the one available in the JDK 1.7
|
||||||
* However, this ones keeps the list of the future, and allows to cancel them all.
|
* However, this ones keeps the list of the future, and allows to cancel them all.
|
||||||
* This means as well that it can be used for a small set of tasks only.
|
* This means as well that it can be used for a small set of tasks only.
|
||||||
* <br>Implementation is not Thread safe.
|
* <br>Implementation is not Thread safe.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class BoundedCompletionService<V> {
|
public class BoundedCompletionService<V> {
|
||||||
private final Executor executor;
|
private final Executor executor;
|
||||||
private final List<Future<V>> tasks; // alls the tasks
|
private final List<Future<V>> tasks; // alls the tasks
|
||||||
|
|
|
@ -23,12 +23,15 @@ import java.io.IOException;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility class that is used to generate a Checksum object.
|
* Utility class that is used to generate a Checksum object.
|
||||||
* The Checksum implementation is pluggable and an application
|
* The Checksum implementation is pluggable and an application
|
||||||
* can specify their own class that implements their own
|
* can specify their own class that implements their own
|
||||||
* Checksum algorithm.
|
* Checksum algorithm.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ChecksumFactory {
|
public class ChecksumFactory {
|
||||||
|
|
||||||
static private final Class<?>[] EMPTY_ARRAY = new Class[]{};
|
static private final Class<?>[] EMPTY_ARRAY = new Class[]{};
|
||||||
|
@ -51,7 +54,7 @@ public class ChecksumFactory {
|
||||||
* @param className classname for which an constructor is created
|
* @param className classname for which an constructor is created
|
||||||
* @return a new Constructor object
|
* @return a new Constructor object
|
||||||
*/
|
*/
|
||||||
static public Constructor<?> newConstructor(String className)
|
static public Constructor<?> newConstructor(String className)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try {
|
try {
|
||||||
Class<?> clazz = getClassByName(className);
|
Class<?> clazz = getClassByName(className);
|
||||||
|
@ -88,7 +91,7 @@ public class ChecksumFactory {
|
||||||
* @return the class object.
|
* @return the class object.
|
||||||
* @throws ClassNotFoundException if the class is not found.
|
* @throws ClassNotFoundException if the class is not found.
|
||||||
*/
|
*/
|
||||||
static private Class<?> getClassByName(String name)
|
static private Class<?> getClassByName(String name)
|
||||||
throws ClassNotFoundException {
|
throws ClassNotFoundException {
|
||||||
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
|
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
|
||||||
return Class.forName(name, true, classLoader);
|
return Class.forName(name, true, classLoader);
|
||||||
|
|
|
@ -24,13 +24,15 @@ import java.util.zip.Checksum;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checksum types. The Checksum type is a one byte number
|
* Checksum types. The Checksum type is a one byte number
|
||||||
* that stores a representation of the checksum algorithm
|
* that stores a representation of the checksum algorithm
|
||||||
* used to encode a hfile. The ordinal of these cannot
|
* used to encode a hfile. The ordinal of these cannot
|
||||||
* change or else you risk breaking all existing HFiles out there.
|
* change or else you risk breaking all existing HFiles out there.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public enum ChecksumType {
|
public enum ChecksumType {
|
||||||
|
|
||||||
NULL((byte)0) {
|
NULL((byte)0) {
|
||||||
|
@ -70,7 +72,7 @@ public enum ChecksumType {
|
||||||
LOG.trace(PURECRC32 + " not available.");
|
LOG.trace(PURECRC32 + " not available.");
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
// The default checksum class name is java.util.zip.CRC32.
|
// The default checksum class name is java.util.zip.CRC32.
|
||||||
// This is available on all JVMs.
|
// This is available on all JVMs.
|
||||||
if (ctor == null) {
|
if (ctor == null) {
|
||||||
ctor = ChecksumFactory.newConstructor(JDKCRC);
|
ctor = ChecksumFactory.newConstructor(JDKCRC);
|
||||||
|
|
|
@ -24,12 +24,15 @@ import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A collection class that contains multiple sub-lists, which allows us to not copy lists.
|
* A collection class that contains multiple sub-lists, which allows us to not copy lists.
|
||||||
* This class does not support modification. The derived classes that add modifications are
|
* This class does not support modification. The derived classes that add modifications are
|
||||||
* not thread-safe.
|
* not thread-safe.
|
||||||
* NOTE: Doesn't implement list as it is not necessary for current usage, feel free to add.
|
* NOTE: Doesn't implement list as it is not necessary for current usage, feel free to add.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ConcatenatedLists<T> implements Collection<T> {
|
public class ConcatenatedLists<T> implements Collection<T> {
|
||||||
protected final ArrayList<List<T>> components = new ArrayList<List<T>>();
|
protected final ArrayList<List<T>> components = new ArrayList<List<T>>();
|
||||||
protected int size = 0;
|
protected int size = 0;
|
||||||
|
|
|
@ -22,6 +22,8 @@ import java.io.InterruptedIOException;
|
||||||
import java.net.SocketTimeoutException;
|
import java.net.SocketTimeoutException;
|
||||||
import java.nio.channels.ClosedByInterruptException;
|
import java.nio.channels.ClosedByInterruptException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class handles the different interruption classes.
|
* This class handles the different interruption classes.
|
||||||
* It can be:
|
* It can be:
|
||||||
|
@ -31,6 +33,7 @@ import java.nio.channels.ClosedByInterruptException;
|
||||||
* - SocketTimeoutException inherits InterruptedIOException but is not a real
|
* - SocketTimeoutException inherits InterruptedIOException but is not a real
|
||||||
* interruption, so we have to distinguish the case. This pattern is unfortunately common.
|
* interruption, so we have to distinguish the case. This pattern is unfortunately common.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ExceptionUtil {
|
public class ExceptionUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a very fast, non-cryptographic hash suitable for general hash-based
|
* This is a very fast, non-cryptographic hash suitable for general hash-based
|
||||||
* lookup. See http://code.google.com/p/smhasher/wiki/MurmurHash3 for details.
|
* lookup. See http://code.google.com/p/smhasher/wiki/MurmurHash3 for details.
|
||||||
|
@ -25,6 +28,8 @@ package org.apache.hadoop.hbase.util;
|
||||||
* <p>MurmurHash3 is the successor to MurmurHash2. It comes in 3 variants, and
|
* <p>MurmurHash3 is the successor to MurmurHash2. It comes in 3 variants, and
|
||||||
* the 32-bit version targets low latency for hash table use.</p>
|
* the 32-bit version targets low latency for hash table use.</p>
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Stable
|
||||||
public class MurmurHash3 extends Hash {
|
public class MurmurHash3 extends Hash {
|
||||||
private static MurmurHash3 _instance = new MurmurHash3();
|
private static MurmurHash3 _instance = new MurmurHash3();
|
||||||
|
|
||||||
|
|
|
@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
|
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
|
||||||
public class PrettyPrinter {
|
public class PrettyPrinter {
|
||||||
|
|
||||||
public enum Unit {
|
public enum Unit {
|
||||||
|
|
|
@ -17,9 +17,14 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception thrown when a read only byte range is modified
|
* Exception thrown when a read only byte range is modified
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class ReadOnlyByteRangeException extends UnsupportedOperationException {
|
public class ReadOnlyByteRangeException extends UnsupportedOperationException {
|
||||||
public ReadOnlyByteRangeException() {
|
public ReadOnlyByteRangeException() {
|
||||||
|
|
||||||
|
|
|
@ -17,13 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A read only version of the {@link ByteRange}.
|
* A read only version of the {@link ByteRange}.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
public class SimpleByteRange extends AbstractByteRange {
|
public class SimpleByteRange extends AbstractByteRange {
|
||||||
public SimpleByteRange() {
|
public SimpleByteRange() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public SimpleByteRange(int capacity) {
|
public SimpleByteRange(int capacity) {
|
||||||
this(new byte[capacity]);
|
this(new byte[capacity]);
|
||||||
}
|
}
|
||||||
|
@ -110,7 +115,7 @@ public class SimpleByteRange extends AbstractByteRange {
|
||||||
}
|
}
|
||||||
return clone;
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ByteRange shallowCopySubRange(int innerOffset, int copyLength) {
|
public ByteRange shallowCopySubRange(int innerOffset, int copyLength) {
|
||||||
SimpleByteRange clone = new SimpleByteRange(bytes, offset + innerOffset,
|
SimpleByteRange clone = new SimpleByteRange(bytes, offset + innerOffset,
|
||||||
|
@ -120,7 +125,7 @@ public class SimpleByteRange extends AbstractByteRange {
|
||||||
}
|
}
|
||||||
return clone;
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object thatObject) {
|
public boolean equals(Object thatObject) {
|
||||||
if (thatObject == null){
|
if (thatObject == null){
|
||||||
|
|
|
@ -62,6 +62,43 @@ public class ClassFinder {
|
||||||
boolean isCandidateClass(Class<?> c);
|
boolean isCandidateClass(Class<?> c);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public static class Not implements ResourcePathFilter, FileNameFilter, ClassFilter {
|
||||||
|
private ResourcePathFilter resourcePathFilter;
|
||||||
|
private FileNameFilter fileNameFilter;
|
||||||
|
private ClassFilter classFilter;
|
||||||
|
|
||||||
|
public Not(ResourcePathFilter resourcePathFilter){this.resourcePathFilter = resourcePathFilter;}
|
||||||
|
public Not(FileNameFilter fileNameFilter){this.fileNameFilter = fileNameFilter;}
|
||||||
|
public Not(ClassFilter classFilter){this.classFilter = classFilter;}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCandidatePath(String resourcePath, boolean isJar) {
|
||||||
|
return !resourcePathFilter.isCandidatePath(resourcePath, isJar);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateFile(String fileName, String absFilePath) {
|
||||||
|
return !fileNameFilter.isCandidateFile(fileName, absFilePath);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
return !classFilter.isCandidateClass(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class And implements ClassFilter {
|
||||||
|
ClassFilter[] classFilters;
|
||||||
|
public And(ClassFilter...classFilters) { this.classFilters = classFilters; }
|
||||||
|
@Override
|
||||||
|
public boolean isCandidateClass(Class<?> c) {
|
||||||
|
for (ClassFilter filter : classFilters) {
|
||||||
|
if (!filter.isCandidateClass(c)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public ClassFinder() {
|
public ClassFinder() {
|
||||||
this(null, null, null);
|
this(null, null, null);
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,6 +103,10 @@
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<!-- Intra-project dependencies -->
|
<!-- Intra-project dependencies -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-annotations</artifactId>
|
||||||
|
</dependency>
|
||||||
<!-- General dependencies -->
|
<!-- General dependencies -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.google.protobuf</groupId>
|
<groupId>com.google.protobuf</groupId>
|
||||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
import com.google.protobuf.HBaseZeroCopyByteString;
|
import com.google.protobuf.HBaseZeroCopyByteString;
|
||||||
|
@ -26,6 +27,7 @@ import com.google.protobuf.HBaseZeroCopyByteString;
|
||||||
/**
|
/**
|
||||||
* Hack to workaround HBASE-1304 issue that keeps bubbling up when a mapreduce context.
|
* Hack to workaround HBASE-1304 issue that keeps bubbling up when a mapreduce context.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ByteStringer {
|
public class ByteStringer {
|
||||||
private static final Log LOG = LogFactory.getLog(ByteStringer.class);
|
private static final Log LOG = LogFactory.getLog(ByteStringer.class);
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.zip.Checksum;
|
import java.util.zip.Checksum;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
/**
|
/**
|
||||||
* Utility methods to compute and validate checksums.
|
* Utility methods to compute and validate checksums.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
public class ChecksumUtil {
|
public class ChecksumUtil {
|
||||||
|
|
||||||
/** This is used to reserve space in a byte buffer */
|
/** This is used to reserve space in a byte buffer */
|
||||||
|
|
Loading…
Reference in New Issue