HBASE-4403 Adopt interface stability/audience classifications from Hadoop AND HBASE-5502 region_mover.rb fails to load regions back to original server for regions only containing empty tables

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1295710 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-03-01 17:53:03 +00:00
parent 2b5b8bb4f8
commit dace419238
579 changed files with 1726 additions and 5 deletions

View File

@ -364,7 +364,8 @@ def loadRegions(options, hostname)
for r in regions
exists = false
begin
exists = isSuccessfulScan(admin, r)
isSuccessfulScan(admin, r)
exists = true
rescue org.apache.hadoop.hbase.NotServingRegionException => e
$LOG.info("Failed scan of " + e.message)
end

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Interface to support the aborting of a given server or client.
* <p>
@ -27,6 +29,7 @@ package org.apache.hadoop.hbase;
* <p>
* Implemented by the Master, RegionServer, and TableServers (client).
*/
@InterfaceAudience.Private
public interface Abortable {
/**
* Abort the server or client.

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Sleeper;
@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.util.Sleeper;
* <p>Don't subclass Chore if the task relies on being woken up for something to
* do, such as an entry being added to a queue, etc.
*/
@InterfaceAudience.Private
public abstract class Chore extends HasThread {
private final Log LOG = LogFactory.getLog(this.getClass());
private final Sleeper sleeper;

View File

@ -21,11 +21,16 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown by the master when a region server clock skew is
* too high.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ClockOutOfSyncException extends IOException {
public ClockOutOfSyncException(String message) {
super(message);

View File

@ -31,6 +31,8 @@ import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.VersionMismatchException;
@ -54,6 +56,8 @@ import org.apache.hadoop.io.VersionedWritable;
* <li>The unique cluster ID</li>
* </ul>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ClusterStatus extends VersionedWritable {
/**
* Version for object serialization. Incremented for changes in serialized

View File

@ -18,9 +18,14 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Coprocess interface.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Coprocessor {
static final int VERSION = 1;

View File

@ -18,12 +18,16 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HTableInterface;
/**
* Coprocessor environment state.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CoprocessorEnvironment {
/** @return the Coprocessor interface version */

View File

@ -21,10 +21,15 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Subclass if exception is not meant to be retried: e.g.
* {@link UnknownScannerException}
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DoNotRetryIOException extends IOException {
private static final long serialVersionUID = 1197446454511704139L;

View File

@ -16,11 +16,16 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown during flush if the possibility snapshot content was not properly
* persisted into store files. Response should include replay of hlog content.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DroppedSnapshotException extends IOException {
private static final long serialVersionUID = -5463156580831677374L;

View File

@ -19,12 +19,14 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.WatchedEvent;
/**
* An empty ZooKeeper watcher
*/
@InterfaceAudience.Private
public class EmptyWatcher implements Watcher {
public static EmptyWatcher instance = new EmptyWatcher();
private EmptyWatcher() {}

View File

@ -23,12 +23,16 @@ import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.VersionInfo;
/**
* Adds HBase configuration files to a Configuration
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HBaseConfiguration extends Configuration {
private static final Log LOG = LogFactory.getLog(HBaseConfiguration.class);

View File

@ -26,6 +26,8 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.Compression;
@ -45,6 +47,8 @@ import org.apache.hadoop.io.WritableComparable;
* column and recreating it. If there is data stored in the column, it will be
* deleted when the column is deleted.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HColumnDescriptor implements WritableComparable<HColumnDescriptor> {
// For future backward compatibility

View File

@ -25,12 +25,16 @@ import java.util.List;
import java.util.UUID;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
/**
* HConstants holds a bunch of HBase-related constants
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public final class HConstants {
/**
* Status codes used for return values of bulk operations.

View File

@ -27,10 +27,13 @@ import java.util.NavigableSet;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Data structure to describe the distribution of HDFS blocks amount hosts
*/
@InterfaceAudience.Private
public class HDFSBlocksDistribution {
private Map<String,HostAndWeight> hostAndWeights = null;
private long uniqueBlocksTotalWeight = 0;

View File

@ -27,6 +27,8 @@ import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -44,6 +46,8 @@ import org.apache.hadoop.io.WritableComparable;
* Contains HRegion id, start and end keys, a reference to this
* HRegions' table descriptor, etc.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HRegionInfo extends VersionedWritable
implements WritableComparable<HRegionInfo> {
// VERSION == 0 when HRegionInfo had an HTableDescriptor inside it.

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Addressing;
/**
@ -28,6 +30,8 @@ import org.apache.hadoop.hbase.util.Addressing;
* instances are the same if they refer to the same 'location' (the same
* hostname and port), though they may be carrying different regions.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HRegionLocation implements Comparable<HRegionLocation> {
private final HRegionInfo regionInfo;
private final String hostname;

View File

@ -29,6 +29,8 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.io.VersionedWritable;
@ -38,6 +40,8 @@ import org.apache.hadoop.io.WritableUtils;
/**
* This class is used exporting current state of load on a RegionServer.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HServerLoad extends VersionedWritable
implements WritableComparable<HServerLoad> {
private static final byte VERSION = 2;

View File

@ -31,6 +31,8 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.regex.Matcher;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.Compression;
@ -45,6 +47,8 @@ import org.apache.hadoop.io.WritableComparable;
* <code> .META. </code>, is the table is read only, the maximum size of the memstore,
* when the region split should occur, coprocessors associated with it etc...
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**

View File

@ -21,10 +21,15 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if a request is table schema modification is requested but
* made for an invalid family name.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class InvalidFamilyOperationException extends IOException {
private static final long serialVersionUID = 1L << 22 - 1L;
/** default constructor */

View File

@ -30,6 +30,8 @@ import java.util.Map;
import com.google.common.primitives.Longs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.util.Bytes;
@ -63,6 +65,8 @@ import org.apache.hadoop.io.Writable;
* be < <code>Integer.MAX_SIZE</code>.
* The column does not contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER}
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KeyValue implements Writable, HeapSize {
static final Log LOG = LogFactory.getLog(KeyValue.class);
// TODO: Group Key-only comparators and operations into a Key class, just

View File

@ -27,6 +27,8 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -55,6 +57,8 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
* instead of 60000.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class LocalHBaseCluster {
static final Log LOG = LogFactory.getLog(LocalHBaseCluster.class);
private final List<JVMClusterUtil.MasterThread> masterThreads =

View File

@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
* <p>
* You can get the current master via {@link #getMasterAddress()}
*/
@InterfaceAudience.Private
public class MasterAddressTracker extends ZooKeeperNodeTracker {
/**
* Construct a master address listener with the specified

View File

@ -21,9 +21,14 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if the master is not running
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MasterNotRunningException extends IOException {
private static final long serialVersionUID = 1L << 23 - 1L;
/** default constructor */

View File

@ -20,11 +20,15 @@
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.DoNotRetryIOException;
/**
* Thrown when an operation requires the root and all meta regions to be online
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
private static final long serialVersionUID = 6439786157874827523L;
/**

View File

@ -21,12 +21,16 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Thrown by a region server if it is sent a request for a region it is not
* serving.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NotServingRegionException extends IOException {
private static final long serialVersionUID = 1L << 17 - 1L;

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown by the master when a region server was shut down and
@ -28,8 +30,10 @@ import java.io.IOException;
* operations
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class PleaseHoldException extends IOException {
public PleaseHoldException(String message) {
super(message);
}
}
}

View File

@ -20,10 +20,15 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when something happens related to region handling.
* Subclasses have to be more specific.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RegionException extends IOException {
private static final long serialVersionUID = 1473510258071111371L;

View File

@ -23,12 +23,14 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
/**
* An immutable class which contains a static method for handling
* org.apache.hadoop.ipc.RemoteException exceptions.
*/
@InterfaceAudience.Private
public class RemoteExceptionHandler {
/* Not instantiable */
private RemoteExceptionHandler() {super();}

View File

@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -27,6 +28,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
* Defines the set of shared functions implemented by HBase servers (Masters
* and RegionServers).
*/
@InterfaceAudience.Private
public interface Server extends Abortable, Stoppable {
/**
* Gets the configuration object for this server.

View File

@ -22,6 +22,8 @@ package org.apache.hadoop.hbase;
import java.util.Collection;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@ -43,6 +45,8 @@ import org.apache.hadoop.hbase.util.Bytes;
*
* <p>Immutable.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ServerName implements Comparable<ServerName> {
/**
* Version for this class.

View File

@ -19,9 +19,12 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Implementers are Stoppable.
*/
@InterfaceAudience.Private
public interface Stoppable {
/**
* Stop this service.

View File

@ -21,10 +21,15 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Get, remove and modify table descriptors.
* Used by servers to host descriptors.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TableDescriptors {
/**
* @param tablename

View File

@ -17,9 +17,14 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when a table exists but should not
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TableExistsException extends IOException {
private static final long serialVersionUID = 1L << 7 - 1L;
/** default constructor */

View File

@ -21,11 +21,15 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Thrown if a table should be offline but is not
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TableNotDisabledException extends IOException {
private static final long serialVersionUID = 1L << 19 - 1L;
/** default constructor */

View File

@ -21,11 +21,15 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Thrown if a table should be enabled but is not
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TableNotEnabledException extends IOException {
private static final long serialVersionUID = 262144L;
/** default constructor */

View File

@ -19,7 +19,12 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Thrown when a table can not be located */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TableNotFoundException extends RegionException {
private static final long serialVersionUID = 993179627856392526L;

View File

@ -21,9 +21,14 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when we are asked to operate on a region we know nothing about.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UnknownRegionException extends IOException {
private static final long serialVersionUID = 1968858760475205392L;

View File

@ -19,10 +19,15 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if a region server is passed an unknown row lock id
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UnknownRowLockException extends DoNotRetryIOException {
private static final long serialVersionUID = 993179627856392526L;

View File

@ -19,6 +19,9 @@
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if a region server is passed an unknown scanner id.
@ -26,6 +29,8 @@ package org.apache.hadoop.hbase;
* scanner lease on the serverside has expired OR the serverside is closing
* down and has cancelled all leases.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UnknownScannerException extends DoNotRetryIOException {
private static final long serialVersionUID = 993179627856392526L;

View File

@ -19,12 +19,15 @@ package org.apache.hadoop.hbase;
import java.lang.annotation.*;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A package attribute that captures the version of hbase that was compiled.
* Copied down from hadoop. All is same except name of interface.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.PACKAGE)
@InterfaceAudience.Private
public @interface VersionAnnotation {
/**

View File

@ -21,12 +21,17 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown by the master when a region server reports and is
* already being processed as dead. This can happen when a region server loses
* its session but didn't figure it yet.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class YouAreDeadException extends IOException {
public YouAreDeadException(String message) {
super(message);

View File

@ -21,9 +21,14 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown if the client can't connect to zookeeper
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ZooKeeperConnectionException extends IOException {
private static final long serialVersionUID = 1L << 23 - 1L;
/** default constructor */

View File

@ -30,6 +30,7 @@ import org.apache.avro.ipc.specific.SpecificResponder;
import org.apache.avro.util.Utf8;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -59,6 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes;
/**
* Start an Avro server
*/
@InterfaceAudience.Private
public class AvroServer {
/**

View File

@ -27,6 +27,7 @@ import org.apache.avro.Schema;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress;
@ -59,6 +60,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Private
public class AvroUtil {
//

View File

@ -28,6 +28,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionInfo;
@ -57,6 +58,7 @@ import org.apache.hadoop.ipc.RemoteException;
* <p>Call {@link #start()} to start up operation. Call {@link #stop()}} to
* interrupt waits and close up shop.
*/
@InterfaceAudience.Private
public class CatalogTracker {
// TODO: This class needs a rethink. The original intent was that it would be
// the one-stop-shop for root and meta locations and that it would get this

View File

@ -24,6 +24,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.util.Writables;
* TODO: Put MetaReader and MetaEditor together; doesn't make sense having
* them distinct.
*/
@InterfaceAudience.Private
public class MetaEditor {
// TODO: Strip CatalogTracker from this class. Its all over and in the end
// its only used to get its Configuration so we can get associated

View File

@ -24,6 +24,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;

View File

@ -27,6 +27,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -46,6 +47,7 @@ import org.apache.hadoop.ipc.RemoteException;
/**
* Reads region and assignment information from <code>.META.</code>.
*/
@InterfaceAudience.Private
public class MetaReader {
// TODO: Strip CatalogTracker from this class. Its all over and in the end
// its only used to get its Configuration so we can get associated

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.catalog;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -30,6 +31,7 @@ import org.apache.zookeeper.KeeperException;
/**
* Makes changes to the location of <code>-ROOT-</code> in ZooKeeper.
*/
@InterfaceAudience.Private
public class RootLocationEditor {
private static final Log LOG = LogFactory.getLog(RootLocationEditor.class);

View File

@ -20,9 +20,14 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Helper class for custom client scanners.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractClientScanner implements ResultScanner {
@Override

View File

@ -23,6 +23,8 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
@ -32,6 +34,8 @@ import org.apache.hadoop.io.Writable;
* {@link HTable::batch} to associate the action with it's region and maintain
* the index from the original request.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Action<R> implements Writable, Comparable {
private Row action;

View File

@ -25,6 +25,8 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
@ -41,6 +43,8 @@ import org.apache.hadoop.io.Writable;
* row to append to. At least one column to append must be specified using the
* {@link #add(byte[], byte[], byte[])} method.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Append extends Mutation implements Row {
// TODO: refactor to derive from Put?
private static final String RETURN_RESULTS = "_rr_";

View File

@ -22,6 +22,11 @@ package org.apache.hadoop.hbase.client;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Attributes {
/**
* Sets an attribute.

View File

@ -23,6 +23,8 @@ import java.util.LinkedList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
@ -40,6 +42,8 @@ import org.apache.hadoop.io.DataOutputBuffer;
* If there are multiple regions in a table, this scanner will iterate
* through them all.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ClientScanner extends AbstractClientScanner {
private final Log LOG = LogFactory.getLog(this.getClass());
private Scan scan;

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
@ -24,6 +26,8 @@ import org.apache.hadoop.hbase.HConstants;
* Utility used by client connections such as {@link HConnection} and
* {@link ServerCallable}
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ConnectionUtils {
/**
* Calculate pause time.

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
@ -64,6 +66,8 @@ import java.util.Map;
* deleteFamily -- then you need to use the method overrides that take a
* timestamp. The constructor timestamp is not referenced.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Delete extends Mutation
implements Writable, Row, Comparable<Row> {
private static final byte DELETE_VERSION = (byte)3;

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.filter.Filter;
@ -63,6 +65,8 @@ import java.util.TreeSet;
* <p>
* To add a filter, execute {@link #setFilter(Filter) setFilter}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Get extends OperationWithAttributes
implements Writable, Row, Comparable<Row> {
private static final byte GET_VERSION = (byte)2;

View File

@ -32,6 +32,8 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.ClusterStatus;
@ -72,6 +74,8 @@ import org.apache.hadoop.util.StringUtils;
* <p>Currently HBaseAdmin instances are not expected to be long-lived. For
* example, an HBaseAdmin instance will not ride over a Master restart.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HBaseAdmin implements Abortable, Closeable {
private final Log LOG = LogFactory.getLog(this.getClass().getName());
// private final HConnection connection;

View File

@ -25,6 +25,8 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionInfo;
@ -57,6 +59,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
*
* @see HConnectionManager
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface HConnection extends Abortable, Closeable {
/**
* @return Configuration instance being used by this HConnection instance.

View File

@ -47,6 +47,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -131,6 +133,8 @@ import org.apache.zookeeper.KeeperException;
* cleanup to the client.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HConnectionManager {
// An LRU Map of HConnectionKey -> HConnection (TableServer). All
// access must be synchronized. This map is not private because tests

View File

@ -39,6 +39,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
@ -99,6 +101,8 @@ import org.apache.hadoop.hbase.util.Writables;
* @see HConnection
* @see HConnectionManager
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HTable implements HTableInterface {
private static final Log LOG = LogFactory.getLog(HTable.class);
private HConnection connection;

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
@ -28,6 +30,8 @@ import java.io.IOException;
*
* @since 0.21.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HTableFactory implements HTableInterfaceFactory {
@Override
public HTableInterface createHTableInterface(Configuration config,

View File

@ -23,6 +23,8 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
@ -36,6 +38,8 @@ import java.util.Map;
*
* @since 0.21.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface HTableInterface extends Closeable {
/**

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -29,6 +31,8 @@ import org.apache.hadoop.conf.Configuration;
*
* @since 0.21.0
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface HTableInterfaceFactory {
/**

View File

@ -25,6 +25,8 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -56,6 +58,8 @@ import org.apache.hadoop.hbase.util.PoolMap.PoolType;
* Pool will manage its own connections to the cluster. See
* {@link HConnectionManager}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HTablePool implements Closeable {
private final PoolMap<String, HTableInterface> tables;
private final int maxSize;

View File

@ -25,6 +25,9 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@ -35,6 +38,8 @@ import org.apache.hadoop.hbase.client.Row;
*
*
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HTableUtil {
private static final int INITIAL_LIST_SIZE = 250;

View File

@ -27,6 +27,8 @@ import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
@ -43,6 +45,8 @@ import org.apache.hadoop.io.Writable;
* to increment. At least one column to increment must be specified using the
* {@link #addColumn(byte[], byte[], long)} method.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Increment implements Row {
private static final byte INCREMENT_VERSION = (byte)2;

View File

@ -20,6 +20,9 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specify Isolation levels in Scan operations.
* <p>
@ -29,6 +32,8 @@ package org.apache.hadoop.hbase.client;
* should return data that is being modified by transactions that might
* not have been committed yet.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum IsolationLevel {
READ_COMMITTED(1),

View File

@ -28,6 +28,8 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@ -45,6 +47,8 @@ import org.apache.hadoop.hbase.util.Writables;
* Although public visibility, this is not a public-facing API and may evolve in
* minor releases.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MetaScanner {
private static final Log LOG = LogFactory.getLog(MetaScanner.class);
/**

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
@ -36,6 +38,8 @@ import java.util.TreeMap;
* Container for Actions (i.e. Get, Delete, or Put), which are grouped by
* regionName. Intended to be used with HConnectionManager.processBatch()
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class MultiAction<R> implements Writable {
// map of regions to lists of puts/gets/deletes for that region.

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.util.Bytes;
@ -42,6 +44,8 @@ import java.util.TreeSet;
* @deprecated Use MultiAction instead
* Data type class for putting multiple regions worth of puts in one RPC.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MultiPut extends Operation implements Writable {
public HServerAddress address; // client code ONLY

View File

@ -21,6 +21,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@ -42,6 +44,8 @@ import java.util.TreeMap;
/**
* A container for Result objects, grouped by regionName.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MultiResponse implements Writable {
// map of regionName to list of (Results paired to the original index for that

View File

@ -27,10 +27,14 @@ import java.util.Map;
import java.util.TreeMap;
import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class Mutation extends OperationWithAttributes {
// Attribute used in Mutations to indicate the originating cluster.
private static final String CLUSTER_ID_ATTR = "_c.id_";

View File

@ -19,11 +19,15 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.RegionException;
/**
* Thrown when no region server can be found for a region
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class NoServerForRegionException extends RegionException {
private static final long serialVersionUID = 1L << 11 - 1L;

View File

@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
/**
@ -29,6 +31,8 @@ import org.codehaus.jackson.map.ObjectMapper;
* (e.g. Put, Get, Delete, Scan, Next, etc.)
* Contains methods for exposure to logging and debugging tools.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class Operation {
// TODO make this configurable
private static final int DEFAULT_MAX_COLS = 5;

View File

@ -27,10 +27,14 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.io.WritableUtils;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class OperationWithAttributes extends Operation implements Attributes {
// a opaque blob of attributes
private Map<String, byte[]> attributes;

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.HeapSize;
@ -43,6 +45,8 @@ import java.util.TreeMap;
* for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or
* {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Put extends Mutation
implements HeapSize, Writable, Row, Comparable<Row> {
private static final byte PUT_VERSION = (byte)2;

View File

@ -19,9 +19,13 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.RegionException;
/** Thrown when a table can not be located */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RegionOfflineException extends RegionException {
private static final long serialVersionUID = 466008402L;
/** default constructor */

View File

@ -31,6 +31,8 @@ import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.SplitKeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@ -65,6 +67,8 @@ import org.apache.hadoop.io.Writable;
* through {@link KeyValue#getRow()}, {@link KeyValue#getFamily()}, {@link KeyValue#getQualifier()},
* {@link KeyValue#getTimestamp()}, and {@link KeyValue#getValue()}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Result implements Writable, WritableWithSize {
private static final byte RESULT_VERSION = (byte)1;

View File

@ -22,10 +22,15 @@ package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface for client-side scanning.
* Go to {@link HTable} to obtain instances.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ResultScanner extends Closeable, Iterable<Result> {
/**

View File

@ -19,10 +19,15 @@ import java.io.IOException;
import java.util.Date;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception thrown by HTable methods when an attempt to do something (like
* commit changes) fails after a bunch of retries.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RetriesExhaustedException extends IOException {
private static final long serialVersionUID = 1876775844L;

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
@ -42,6 +44,8 @@ import java.util.Set;
* {@link #getCause(int)}, {@link #getRow(int)} and {@link #getAddress(int)}.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RetriesExhaustedWithDetailsException
extends RetriesExhaustedException {
List<Throwable> exceptions;

View File

@ -19,11 +19,15 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.WritableComparable;
/**
* Has a row.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Row extends WritableComparable<Row> {
/**
* @return The row.

View File

@ -19,9 +19,14 @@
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Holds row name and lock id.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RowLock {
private byte [] row = null;
private long lockId = -1L;

View File

@ -25,6 +25,8 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
@ -36,6 +38,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* The mutations are performed in the order in which they
* were added.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RowMutations implements Row {
private List<Mutation> mutations = new ArrayList<Mutation>();
private byte [] row;

View File

@ -20,6 +20,8 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.filter.Filter;
@ -80,6 +82,8 @@ import java.util.TreeSet;
* Expert: To explicitly disable server-side block caching for this scan,
* execute {@link #setCacheBlocks(boolean)}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Scan extends OperationWithAttributes implements Writable {
private static final String RAW_ATTR = "_raw_";
private static final String ISOLATION_LEVEL = "_isolationlevel_";

View File

@ -23,6 +23,8 @@ import java.net.UnknownHostException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
@ -36,6 +38,8 @@ import org.apache.hadoop.net.DNS;
* Retries scanner operations such as create, next, etc.
* Used by {@link ResultScanner}s made by {@link HTable}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ScannerCallable extends ServerCallable<Result[]> {
private static final Log LOG = LogFactory.getLog(ScannerCallable.class);
private long scannerId = -1L;

View File

@ -20,11 +20,15 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.DoNotRetryIOException;
/**
* Thrown when a scanner has timed out.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ScannerTimeoutException extends DoNotRetryIOException {
private static final long serialVersionUID = 8788838690290688313L;

View File

@ -28,6 +28,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
@ -48,6 +50,8 @@ import org.apache.hadoop.ipc.RemoteException;
* @see HConnection#getRegionServerWithoutRetries(ServerCallable)
* @param <T> the class that the ServerCallable handles
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ServerCallable<T> implements Callable<T> {
protected final HConnection connection;
protected final byte [] tableName;

View File

@ -20,12 +20,16 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.io.hfile.Compression;
/**
* Immutable HColumnDescriptor
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
/**

View File

@ -20,8 +20,12 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionInfo;
@InterfaceAudience.Public
@InterfaceStability.Evolving
class UnmodifyableHRegionInfo extends HRegionInfo {
/*
* Creates an unmodifyable copy of an HRegionInfo

View File

@ -20,12 +20,16 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
/**
* Read-only table descriptor.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class UnmodifyableHTableDescriptor extends HTableDescriptor {
/** Default constructor */
public UnmodifyableHTableDescriptor() {

View File

@ -31,6 +31,8 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
@ -62,6 +64,8 @@ import org.apache.hadoop.hbase.util.Pair;
* parameter type. For average and std, it returns a double value. For row
* count, it returns a long value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AggregationClient {
private static final Log log = LogFactory.getLog(AggregationClient.class);

View File

@ -23,6 +23,8 @@ package org.apache.hadoop.hbase.client.coprocessor;
import org.apache.commons.lang.reflect.MethodUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import java.io.IOException;
@ -36,6 +38,8 @@ import java.lang.reflect.Proxy;
* A collection of interfaces and utilities used for interacting with custom RPC
* interfaces exposed by Coprocessors.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class Batch {
private static Log LOG = LogFactory.getLog(Batch.class);

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.client.coprocessor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Row;
@ -51,6 +53,8 @@ import java.lang.reflect.Method;
* @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
* @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Exec extends Invocation implements Row {
/** Row key used as a reference for any region lookups */
private byte[] referenceRow;

View File

@ -19,6 +19,8 @@
*/
package org.apache.hadoop.hbase.client.coprocessor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Classes;
@ -46,6 +48,8 @@ import java.io.Serializable;
* @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)
* @see org.apache.hadoop.hbase.client.HTable#coprocessorExec(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ExecResult implements Writable {
private byte[] regionName;
private Object value;

View File

@ -23,6 +23,8 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
import org.apache.hadoop.hbase.util.Bytes;
@ -35,6 +37,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* TestAggregateProtocol methods for its sample usage.
* Its methods handle null arguments gracefully.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class LongColumnInterpreter implements ColumnInterpreter<Long, Long> {
public Long getValue(byte[] colFamily, byte[] colQualifier, KeyValue kv)

View File

@ -26,6 +26,8 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.metrics.util.MetricsBase;
@ -45,6 +47,8 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
* However, there is no need for this. So they are defined under scan operation
* for now.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ScanMetrics implements Writable {
private static final byte SCANMETRICS_VERSION = (byte)1;

View File

@ -17,12 +17,14 @@
*/
package org.apache.hadoop.hbase.constraint;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configured;
/**
* Base class to use when actually implementing a {@link Constraint}. It takes
* care of getting and setting of configuration for the constraint.
*/
@InterfaceAudience.Private
public abstract class BaseConstraint extends Configured implements Constraint {
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.constraint;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Put;
@ -61,6 +62,7 @@ import org.apache.hadoop.hbase.client.Put;
* @see BaseConstraint
* @see Constraints
*/
@InterfaceAudience.Private
public interface Constraint extends Configurable {
/**

Some files were not shown because too many files have changed in this diff Show More