HBASE-12526 Remove unused imports (Varun Saxena)
This commit is contained in:
parent
5610732173
commit
a5169d422b
|
@ -21,21 +21,20 @@ import java.lang.annotation.Documented;
|
||||||
import java.lang.annotation.Retention;
|
import java.lang.annotation.Retention;
|
||||||
import java.lang.annotation.RetentionPolicy;
|
import java.lang.annotation.RetentionPolicy;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience.Private;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience.Public;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Annotation to inform users of how much to rely on a particular package,
|
* Annotation to inform users of how much to rely on a particular package,
|
||||||
* class or method not changing over time. Currently the stability can be
|
* class or method not changing over time. Currently the stability can be
|
||||||
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
|
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
|
||||||
*
|
*
|
||||||
* <ul><li>All classes that are annotated with {@link Public} or
|
* <ul><li>All classes that are annotated with
|
||||||
* {@link LimitedPrivate} must have InterfaceStability annotation. </li>
|
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.Public} or
|
||||||
* <li>Classes that are {@link Private} are to be considered unstable unless
|
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
|
||||||
* a different InterfaceStability annotation states otherwise.</li>
|
* must have InterfaceStability annotation. </li> <li>Classes that are
|
||||||
* <li>Incompatible changes must not be made to classes marked as stable.</li>
|
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
|
||||||
* </ul>
|
* are to be considered unstable unless a different InterfaceStability annotation
|
||||||
|
* states otherwise.</li> <li>Incompatible changes must not be made to classes
|
||||||
|
* marked as stable.</li> </ul>
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thrown if a request is table schema modification is requested but
|
* Thrown if a request is table schema modification is requested but
|
||||||
|
|
|
@ -23,7 +23,6 @@ package org.apache.hadoop.hbase;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Strings;
|
import org.apache.hadoop.hbase.util.Strings;
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
|
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by
|
* A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by
|
||||||
|
|
|
@ -30,13 +30,11 @@ import org.apache.hadoop.hbase.ClusterStatus;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.UnknownRegionException;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||||
|
|
|
@ -20,11 +20,6 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.net.Inet4Address;
|
|
||||||
import java.net.Inet6Address;
|
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.NetworkInterface;
|
|
||||||
import java.util.Enumeration;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,6 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
|
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,16 +19,12 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A container for Result objects, grouped by regionName.
|
* A container for Result objects, grouped by regionName.
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.RegionException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thrown when no region server can be found for a region
|
* Thrown when no region server can be found for a region
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception thrown by HTable methods when an attempt to do something (like
|
* Exception thrown by HTable methods when an attempt to do something (like
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory implementation to provide the {@link HConnectionImplementation} with
|
* Factory implementation to provide the {@link HConnectionImplementation} with
|
||||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A reversed ScannerCallable which supports backward scanning.
|
* A reversed ScannerCallable which supports backward scanning.
|
||||||
|
@ -55,7 +54,8 @@ public class ReversedScannerCallable extends ScannerCallable {
|
||||||
* @param scan
|
* @param scan
|
||||||
* @param scanMetrics
|
* @param scanMetrics
|
||||||
* @param locateStartRow The start row for locating regions
|
* @param locateStartRow The start row for locating regions
|
||||||
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver
|
* @param rpcFactory to create an
|
||||||
|
* {@link com.google.protobuf.RpcController} to talk to the regionserver
|
||||||
*/
|
*/
|
||||||
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
||||||
ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) {
|
ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) {
|
||||||
|
@ -69,7 +69,8 @@ public class ReversedScannerCallable extends ScannerCallable {
|
||||||
* @param scan
|
* @param scan
|
||||||
* @param scanMetrics
|
* @param scanMetrics
|
||||||
* @param locateStartRow The start row for locating regions
|
* @param locateStartRow The start row for locating regions
|
||||||
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver
|
* @param rpcFactory to create an
|
||||||
|
* {@link com.google.protobuf.RpcController} to talk to the regionserver
|
||||||
* @param replicaId the replica id
|
* @param replicaId the replica id
|
||||||
*/
|
*/
|
||||||
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.RegionException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets or Scans throw this exception if running without in-row scan flag
|
* Gets or Scans throw this exception if running without in-row scan flag
|
||||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.DNS;
|
import org.apache.hadoop.net.DNS;
|
||||||
|
|
||||||
import com.google.protobuf.RpcController;
|
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import com.google.protobuf.TextFormat;
|
import com.google.protobuf.TextFormat;
|
||||||
|
|
||||||
|
@ -95,9 +94,10 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
|
||||||
* @param connection which connection
|
* @param connection which connection
|
||||||
* @param tableName table callable is on
|
* @param tableName table callable is on
|
||||||
* @param scan the scan to execute
|
* @param scan the scan to execute
|
||||||
* @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
|
* @param scanMetrics the ScanMetrics to used, if it is null,
|
||||||
* metrics
|
* ScannerCallable won't collect metrics
|
||||||
* @param rpcControllerFactory factory to use when creating {@link RpcController}
|
* @param rpcControllerFactory factory to use when creating
|
||||||
|
* {@link com.google.protobuf.RpcController}
|
||||||
*/
|
*/
|
||||||
public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan,
|
public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan,
|
||||||
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
|
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {
|
||||||
|
|
|
@ -23,7 +23,6 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
|
import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.DoubleMsg;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.DoubleMsg;
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
|
@ -23,9 +23,6 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
|
||||||
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
|
|
||||||
|
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
|
|
||||||
|
@ -33,7 +30,8 @@ import com.google.protobuf.Message;
|
||||||
* Defines how value for specific column is interpreted and provides utility
|
* Defines how value for specific column is interpreted and provides utility
|
||||||
* methods like compare, add, multiply etc for them. Takes column family, column
|
* methods like compare, add, multiply etc for them. Takes column family, column
|
||||||
* qualifier and return the cell value. Its concrete implementation should
|
* qualifier and return the cell value. Its concrete implementation should
|
||||||
* handle null case gracefully. Refer to {@link LongColumnInterpreter} for an
|
* handle null case gracefully. Refer to
|
||||||
|
* {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} for an
|
||||||
* example.
|
* example.
|
||||||
* <p>
|
* <p>
|
||||||
* Takes two generic parameters and three Message parameters.
|
* Takes two generic parameters and three Message parameters.
|
||||||
|
@ -130,7 +128,8 @@ Q extends Message, R extends Message> {
|
||||||
* server side to construct the ColumnInterpreter. The server
|
* server side to construct the ColumnInterpreter. The server
|
||||||
* will pass this to the {@link #initialize}
|
* will pass this to the {@link #initialize}
|
||||||
* method. If there is no ColumnInterpreter specific data (for e.g.,
|
* method. If there is no ColumnInterpreter specific data (for e.g.,
|
||||||
* {@link LongColumnInterpreter}) then null should be returned.
|
* {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter})
|
||||||
|
* then null should be returned.
|
||||||
* @return the PB message
|
* @return the PB message
|
||||||
*/
|
*/
|
||||||
public abstract P getRequestData();
|
public abstract P getRequestData();
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.exceptions;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.RegionException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
|
import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Subclass if the server knows the region is now on another server.
|
* Subclass if the server knows the region is now on another server.
|
||||||
|
|
|
@ -213,7 +213,7 @@ public abstract class Filter {
|
||||||
* @return KeyValue which must be next seeked. return null if the filter is not sure which key to
|
* @return KeyValue which must be next seeked. return null if the filter is not sure which key to
|
||||||
* seek to next.
|
* seek to next.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @Deprecated Use {@link #getNextCellHint(Cell)} instead.
|
* Function is Deprecated. Use {@link #getNextCellHint(Cell)} instead.
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
abstract public KeyValue getNextKeyHint(final KeyValue currentKV) throws IOException;
|
abstract public KeyValue getNextKeyHint(final KeyValue currentKV) throws IOException;
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.ArrayList;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||||
|
@ -42,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
* <p>
|
* <p>
|
||||||
* Multiple filters can be combined using {@link FilterList}.
|
* Multiple filters can be combined using {@link FilterList}.
|
||||||
* <p>
|
* <p>
|
||||||
* If an already known column qualifier is looked for, use {@link Get#addColumn}
|
* If an already known column qualifier is looked for, use
|
||||||
|
* {@link org.apache.hadoop.hbase.client.Get#addColumn}
|
||||||
* directly rather than a filter.
|
* directly rather than a filter.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.ArrayList;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||||
|
@ -41,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
* <p>
|
* <p>
|
||||||
* Multiple filters can be combined using {@link FilterList}.
|
* Multiple filters can be combined using {@link FilterList}.
|
||||||
* <p>
|
* <p>
|
||||||
* If an already known row range needs to be scanned, use {@link Scan} start
|
* If an already known row range needs to be scanned, use
|
||||||
|
* {@link org.apache.hadoop.hbase.CellScanner} start
|
||||||
* and stop rows directly rather than a filter.
|
* and stop rows directly rather than a filter.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
|
@ -53,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
* long value), then you can pass in your own comparator instead.
|
* long value), then you can pass in your own comparator instead.
|
||||||
* <p>
|
* <p>
|
||||||
* You must also specify a family and qualifier. Only the value of this column
|
* You must also specify a family and qualifier. Only the value of this column
|
||||||
* will be tested. When using this filter on a {@link Scan} with specified
|
* will be tested. When using this filter on a
|
||||||
|
* {@link org.apache.hadoop.hbase.CellScanner} with specified
|
||||||
* inputs, the column to be tested should also be added as input (otherwise
|
* inputs, the column to be tested should also be added as input (otherwise
|
||||||
* the filter will regard the column as missing).
|
* the filter will regard the column as missing).
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -28,7 +28,6 @@ import com.google.protobuf.Message;
|
||||||
import com.google.protobuf.RpcCallback;
|
import com.google.protobuf.RpcCallback;
|
||||||
import com.google.protobuf.RpcChannel;
|
import com.google.protobuf.RpcChannel;
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.Service;
|
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -37,8 +36,8 @@ import java.io.IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Base class which provides clients with an RPC connection to
|
* Base class which provides clients with an RPC connection to
|
||||||
* call coprocessor endpoint {@link Service}s. Note that clients should not use this class
|
* call coprocessor endpoint {@link com.google.protobuf.Service}s.
|
||||||
* directly, except through
|
* Note that clients should not use this class directly, except through
|
||||||
* {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
|
* {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
|
|
@ -19,12 +19,12 @@ package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A {@link RemoteException} with some extra information. If source exception
|
* A {@link RemoteException} with some extra information. If source exception
|
||||||
* was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true.
|
* was a {@link org.apache.hadoop.hbase.DoNotRetryIOException},
|
||||||
|
* {@link #isDoNotRetry()} will return true.
|
||||||
* <p>A {@link RemoteException} hosts exceptions we got from the server.
|
* <p>A {@link RemoteException} hosts exceptions we got from the server.
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("serial")
|
@SuppressWarnings("serial")
|
||||||
|
|
|
@ -23,11 +23,8 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.protobuf.Descriptors;
|
|
||||||
import com.google.protobuf.Message;
|
|
||||||
import com.google.protobuf.RpcCallback;
|
import com.google.protobuf.RpcCallback;
|
||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.Service;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -35,10 +32,11 @@ import java.io.IOException;
|
||||||
/**
|
/**
|
||||||
* Used for server-side protobuf RPC service invocations. This handler allows
|
* Used for server-side protobuf RPC service invocations. This handler allows
|
||||||
* invocation exceptions to easily be passed through to the RPC server from coprocessor
|
* invocation exceptions to easily be passed through to the RPC server from coprocessor
|
||||||
* {@link Service} implementations.
|
* {@link com.google.protobuf.Service} implementations.
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* When implementing {@link Service} defined methods, coprocessor endpoints can use the following
|
* When implementing {@link com.google.protobuf.Service} defined methods,
|
||||||
|
* coprocessor endpoints can use the following
|
||||||
* pattern to pass exceptions back to the RPC client:
|
* pattern to pass exceptions back to the RPC client:
|
||||||
* <code>
|
* <code>
|
||||||
* public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
|
* public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
|
||||||
|
@ -59,7 +57,8 @@ import java.io.IOException;
|
||||||
public class ServerRpcController implements RpcController {
|
public class ServerRpcController implements RpcController {
|
||||||
/**
|
/**
|
||||||
* The exception thrown within
|
* The exception thrown within
|
||||||
* {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
|
* {@link com.google.protobuf.Service#callMethod(
|
||||||
|
* Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
|
||||||
* if any.
|
* if any.
|
||||||
*/
|
*/
|
||||||
// TODO: it would be good widen this to just Throwable, but IOException is what we allow now
|
// TODO: it would be good widen this to just Throwable, but IOException is what we allow now
|
||||||
|
@ -103,7 +102,7 @@ public class ServerRpcController implements RpcController {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets an exception to be communicated back to the {@link Service} client.
|
* Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client.
|
||||||
* @param ioe the exception encountered during execution of the service method
|
* @param ioe the exception encountered during execution of the service method
|
||||||
*/
|
*/
|
||||||
public void setFailedOn(IOException ioe) {
|
public void setFailedOn(IOException ioe) {
|
||||||
|
|
|
@ -35,7 +35,6 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
|
@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRes
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
|
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
|
||||||
import org.apache.hadoop.hbase.security.access.UserPermission;
|
import org.apache.hadoop.hbase.security.access.UserPermission;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -282,7 +281,7 @@ public class PoolMap<K, V> implements Map<K, V> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
|
* The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
|
||||||
* on the {@link LinkedList} class. It essentially allows resources to be
|
* on the {@link java.util.LinkedList} class. It essentially allows resources to be
|
||||||
* checked out, at which point it is removed from this pool. When the resource
|
* checked out, at which point it is removed from this pool. When the resource
|
||||||
* is no longer required, it should be returned to the pool in order to be
|
* is no longer required, it should be returned to the pool in order to be
|
||||||
* reused.
|
* reused.
|
||||||
|
|
|
@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.executor.EventType;
|
||||||
import org.apache.zookeeper.AsyncCallback;
|
import org.apache.zookeeper.AsyncCallback;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.KeeperException.Code;
|
import org.apache.zookeeper.KeeperException.Code;
|
||||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
|
||||||
import org.apache.zookeeper.KeeperException.NodeExistsException;
|
|
||||||
import org.apache.zookeeper.data.Stat;
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
|
||||||
// We should not be importing this Type here, nor a RegionTransition, etc. This class should be
|
// We should not be importing this Type here, nor a RegionTransition, etc. This class should be
|
||||||
|
@ -123,7 +121,8 @@ public class ZKAssign {
|
||||||
* Creates a new unassigned node in the OFFLINE state for the specified region.
|
* Creates a new unassigned node in the OFFLINE state for the specified region.
|
||||||
*
|
*
|
||||||
* <p>Does not transition nodes from other states. If a node already exists
|
* <p>Does not transition nodes from other states. If a node already exists
|
||||||
* for this region, a {@link NodeExistsException} will be thrown.
|
* for this region, a {@link org.apache.zookeeper.KeeperException.NodeExistsException}
|
||||||
|
* will be thrown.
|
||||||
*
|
*
|
||||||
* <p>Sets a watcher on the unassigned region node if the method is successful.
|
* <p>Sets a watcher on the unassigned region node if the method is successful.
|
||||||
*
|
*
|
||||||
|
@ -247,7 +246,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -275,7 +274,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -302,7 +301,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -330,7 +329,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -359,7 +358,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -387,7 +386,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -417,7 +416,7 @@ public class ZKAssign {
|
||||||
* specified region.
|
* specified region.
|
||||||
*
|
*
|
||||||
* <p>If a node does not already exist for this region, a
|
* <p>If a node does not already exist for this region, a
|
||||||
* {@link NoNodeException} will be thrown.
|
* {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown.
|
||||||
*
|
*
|
||||||
* <p>No watcher is set whether this succeeds or not.
|
* <p>No watcher is set whether this succeeds or not.
|
||||||
*
|
*
|
||||||
|
@ -504,7 +503,8 @@ public class ZKAssign {
|
||||||
* region.
|
* region.
|
||||||
*
|
*
|
||||||
* <p>Does not transition nodes from any states. If a node already exists
|
* <p>Does not transition nodes from any states. If a node already exists
|
||||||
* for this region, a {@link NodeExistsException} will be thrown.
|
* for this region, a {@link org.apache.zookeeper.KeeperException.NodeExistsException}
|
||||||
|
* will be thrown.
|
||||||
*
|
*
|
||||||
* <p>If creation is successful, returns the version number of the CLOSING
|
* <p>If creation is successful, returns the version number of the CLOSING
|
||||||
* node created.
|
* node created.
|
||||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.zookeeper;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
|
@ -32,7 +30,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility methods for reading, and building the ZooKeeper configuration.
|
* Utility methods for reading, and building the ZooKeeper configuration.
|
||||||
|
|
|
@ -24,18 +24,17 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.io.CellOutputStream;
|
import org.apache.hadoop.hbase.io.CellOutputStream;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Encoder/Decoder for Cell.
|
* Encoder/Decoder for Cell.
|
||||||
*
|
*
|
||||||
* <p>Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based
|
* <p>Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
|
||||||
* and without presuming an hfile context. Intent is an Interface that will work for hfile and
|
* only Cell-based rather than KeyValue version 1 based and without presuming
|
||||||
* rpc.
|
* an hfile context. Intent is an Interface that will work for hfile and rpc.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
|
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
|
||||||
public interface Codec {
|
public interface Codec {
|
||||||
// TODO: interfacing with {@link DataBlockEncoder}
|
// TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
|
||||||
/**
|
/**
|
||||||
* Call flush when done. Some encoders may not put anything on the stream until flush is called.
|
* Call flush when done. Some encoders may not put anything on the stream until flush is called.
|
||||||
* On flush, let go of any resources used by the encoder.
|
* On flush, let go of any resources used by the encoder.
|
||||||
|
|
|
@ -23,15 +23,14 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Accepts a stream of Cells. This can be used to build a block of cells during compactions
|
* Accepts a stream of Cells. This can be used to build a block of cells during compactions
|
||||||
* and flushes, or to build a byte[] to send to the client. This could be backed by a
|
* and flushes, or to build a byte[] to send to the client. This could be backed by a
|
||||||
* List<KeyValue>, but more efficient implementations will append results to a
|
* List<KeyValue>, but more efficient implementations will append results to a
|
||||||
* byte[] to eliminate overhead, and possibly encode the cells further.
|
* byte[] to eliminate overhead, and possibly encode the cells further.
|
||||||
* <p>To read Cells, use {@link CellScanner}
|
* <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
|
||||||
* @see CellScanner
|
* @see org.apache.hadoop.hbase.CellScanner
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class HeapMemorySizeUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve global memstore configured size as percentage of total heap.
|
* Retrieve global memstore configured size as percentage of total heap.
|
||||||
* @param conf
|
* @param c
|
||||||
* @param logInvalid
|
* @param logInvalid
|
||||||
*/
|
*/
|
||||||
public static float getGlobalMemStorePercent(final Configuration c, final boolean logInvalid) {
|
public static float getGlobalMemStorePercent(final Configuration c, final boolean logInvalid) {
|
||||||
|
@ -91,7 +91,7 @@ public class HeapMemorySizeUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve configured size for global memstore lower water mark as percentage of total heap.
|
* Retrieve configured size for global memstore lower water mark as percentage of total heap.
|
||||||
* @param conf
|
* @param c
|
||||||
* @param globalMemStorePercent
|
* @param globalMemStorePercent
|
||||||
*/
|
*/
|
||||||
public static float getGlobalMemStoreLowerMark(final Configuration c, float globalMemStorePercent) {
|
public static float getGlobalMemStoreLowerMark(final Configuration c, float globalMemStorePercent) {
|
||||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||||
import org.apache.hadoop.io.Writable;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -33,7 +32,8 @@ import org.apache.hadoop.io.Writable;
|
||||||
* qualifiers.
|
* qualifiers.
|
||||||
* </p>
|
* </p>
|
||||||
* <p>
|
* <p>
|
||||||
* {@code DataType}s are different from Hadoop {@link Writable}s in two
|
* {@code DataType}s are different from Hadoop
|
||||||
|
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two
|
||||||
* significant ways. First, {@code DataType} describes how to serialize a
|
* significant ways. First, {@code DataType} describes how to serialize a
|
||||||
* value, it does not encapsulate a serialized value. Second, {@code DataType}
|
* value, it does not encapsulate a serialized value. Second, {@code DataType}
|
||||||
* implementations provide hints to consumers about relationships between the
|
* implementations provide hints to consumers about relationships between the
|
||||||
|
|
|
@ -19,15 +19,15 @@ package org.apache.hadoop.hbase.types;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@code DataType} that encodes fixed-length values encoded using
|
* An {@code DataType} that encodes fixed-length values encoded using
|
||||||
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it
|
* {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}.
|
||||||
* easier to transition away from direct use of {@link Bytes}.
|
* Intended to make it easier to transition away from direct use of
|
||||||
* @see Bytes#putBytes(byte[], int, byte[], int, int)
|
* {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||||
|
* @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
|
||||||
* @see RawBytes
|
* @see RawBytes
|
||||||
* @see OrderedBlob
|
* @see OrderedBlob
|
||||||
* @see OrderedBlobVar
|
* @see OrderedBlobVar
|
||||||
|
|
|
@ -19,16 +19,16 @@ package org.apache.hadoop.hbase.types;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
import org.apache.hadoop.hbase.util.PositionedByteRange;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@code DataType} that encodes variable-length values encoded using
|
* An {@code DataType} that encodes variable-length values encoded using
|
||||||
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a
|
* {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}.
|
||||||
* termination marker following the raw {@code byte[]} value. Intended to
|
* Includes a termination marker following the raw {@code byte[]} value. Intended to
|
||||||
* make it easier to transition away from direct use of {@link Bytes}.
|
* make it easier to transition away from direct use of
|
||||||
* @see Bytes#putBytes(byte[], int, byte[], int, int)
|
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}.
|
||||||
|
* @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
|
||||||
* @see RawBytes
|
* @see RawBytes
|
||||||
* @see OrderedBlob
|
* @see OrderedBlob
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,15 +19,15 @@ package org.apache.hadoop.hbase.types;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@code DataType} that encodes fixed-length values encoded using
|
* An {@code DataType} that encodes fixed-length values encoded using
|
||||||
* {@link Bytes#toBytes(String)}. Intended to make it easier to transition
|
* {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
|
||||||
* away from direct use of {@link Bytes}.
|
* Intended to make it easier to transition away from direct use of
|
||||||
* @see Bytes#toBytes(String)
|
* {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||||
* @see Bytes#toString(byte[], int, int)
|
* @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
|
||||||
|
* @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
|
||||||
* @see RawString
|
* @see RawString
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
|
|
@ -19,16 +19,16 @@ package org.apache.hadoop.hbase.types;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Order;
|
import org.apache.hadoop.hbase.util.Order;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@code DataType} that encodes variable-length values encoded using
|
* An {@code DataType} that encodes variable-length values encoded using
|
||||||
* {@link Bytes#toBytes(String)}. Includes a termination marker following the
|
* {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
|
||||||
* raw {@code byte[]} value. Intended to make it easier to transition
|
* Includes a termination marker following the raw {@code byte[]} value.
|
||||||
* away from direct use of {@link Bytes}.
|
* Intended to make it easier to transition away from direct use of
|
||||||
* @see Bytes#toBytes(String)
|
* {@link org.apache.hadoop.hbase.util.Bytes}.
|
||||||
* @see Bytes#toString(byte[], int, int)
|
* @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
|
||||||
|
* @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
|
||||||
* @see RawString
|
* @see RawString
|
||||||
* @see OrderedString
|
* @see OrderedString
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
@ -36,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
|
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
|
||||||
PositionedByteRange {
|
PositionedByteRange {
|
||||||
/**
|
/**
|
||||||
* The current index into the range. Like {@link ByteBuffer} position, it
|
* The current index into the range. Like {@link java.nio.ByteBuffer} position, it
|
||||||
* points to the next value that will be read/written in the array. It
|
* points to the next value that will be read/written in the array. It
|
||||||
* provides the appearance of being 0-indexed, even though its value is
|
* provides the appearance of being 0-indexed, even though its value is
|
||||||
* calculated according to offset.
|
* calculated according to offset.
|
||||||
|
@ -183,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
||||||
@Override
|
@Override
|
||||||
public abstract int putVLong(long val);
|
public abstract int putVLong(long val);
|
||||||
/**
|
/**
|
||||||
* Similar to {@link ByteBuffer#flip()}. Sets length to position, position to
|
* Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to
|
||||||
* offset.
|
* offset.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -195,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to
|
* Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to
|
||||||
* capacity.
|
* capacity.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.DataInput;
|
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
|
||||||
import org.apache.hadoop.io.RawComparator;
|
import org.apache.hadoop.io.RawComparator;
|
||||||
import org.apache.hadoop.io.WritableComparator;
|
import org.apache.hadoop.io.WritableComparator;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
|
@ -1510,7 +1509,7 @@ public class Bytes {
|
||||||
* @param b bytes to hash
|
* @param b bytes to hash
|
||||||
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
|
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
|
||||||
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
|
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
|
||||||
* {@link ImmutableBytesWritable} use calculating hash code.
|
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable} use calculating hash code.
|
||||||
*/
|
*/
|
||||||
public static int hashCode(final byte [] b) {
|
public static int hashCode(final byte [] b) {
|
||||||
return hashCode(b, b.length);
|
return hashCode(b, b.length);
|
||||||
|
@ -1521,7 +1520,7 @@ public class Bytes {
|
||||||
* @param length length of the value
|
* @param length length of the value
|
||||||
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
|
* @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
|
||||||
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
|
* passed in array. This method is what {@link org.apache.hadoop.io.Text} and
|
||||||
* {@link ImmutableBytesWritable} use calculating hash code.
|
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable} use calculating hash code.
|
||||||
*/
|
*/
|
||||||
public static int hashCode(final byte [] b, final int length) {
|
public static int hashCode(final byte [] b, final int length) {
|
||||||
return WritableComparator.hashBytes(b, length);
|
return WritableComparator.hashBytes(b, length);
|
||||||
|
|
|
@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
|
|
||||||
import com.google.common.base.Supplier;
|
import com.google.common.base.Supplier;
|
||||||
import com.google.common.collect.Multiset;
|
|
||||||
|
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.ConcurrentModificationException;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
@ -35,7 +33,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A simple concurrent map of sets. This is similar in concept to
|
* A simple concurrent map of sets. This is similar in concept to
|
||||||
* {@link Multiset}, with the following exceptions:
|
* {@link com.google.common.collect.Multiset}, with the following exceptions:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>The set is thread-safe and concurrent: no external locking or
|
* <li>The set is thread-safe and concurrent: no external locking or
|
||||||
* synchronization is required. This is important for the use case where
|
* synchronization is required. This is important for the use case where
|
||||||
|
@ -111,7 +109,7 @@ public class ConcurrentIndex<K, V> {
|
||||||
* associated. <b>Note:</b> if the caller wishes to add or removes values
|
* associated. <b>Note:</b> if the caller wishes to add or removes values
|
||||||
* to under the specified as they're iterating through the returned value,
|
* to under the specified as they're iterating through the returned value,
|
||||||
* they should make a defensive copy; otherwise, a
|
* they should make a defensive copy; otherwise, a
|
||||||
* {@link ConcurrentModificationException} may be thrown.
|
* {@link java.util.ConcurrentModificationException} may be thrown.
|
||||||
* @param key The key
|
* @param key The key
|
||||||
* @return All values associated with the specified key or null if no values
|
* @return All values associated with the specified key or null if no values
|
||||||
* are associated with the key.
|
* are associated with the key.
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.math.BigInteger;
|
||||||
import java.math.MathContext;
|
import java.math.MathContext;
|
||||||
import java.math.RoundingMode;
|
import java.math.RoundingMode;
|
||||||
import java.nio.charset.Charset;
|
import java.nio.charset.Charset;
|
||||||
import java.util.Comparator;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
|
@ -32,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
* their own index into the array.
|
* their own index into the array.
|
||||||
* </p>
|
* </p>
|
||||||
* <p>
|
* <p>
|
||||||
* Designed to be a slimmed-down, mutable alternative to {@link ByteBuffer}.
|
* Designed to be a slimmed-down, mutable alternative to {@link java.nio.ByteBuffer}.
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.apache.hadoop.hbase.util;
|
package org.apache.hadoop.hbase.util;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
|
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
|
||||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionObserver;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanInfo;
|
import org.apache.hadoop.hbase.regionserver.ScanInfo;
|
||||||
|
@ -43,7 +42,6 @@ import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
import org.apache.hadoop.hbase.regionserver.StoreScanner;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.WatchedEvent;
|
import org.apache.zookeeper.WatchedEvent;
|
||||||
import org.apache.zookeeper.Watcher;
|
import org.apache.zookeeper.Watcher;
|
||||||
|
@ -53,7 +51,8 @@ import org.apache.zookeeper.ZooKeeper;
|
||||||
* This is an example showing how a RegionObserver could configured
|
* This is an example showing how a RegionObserver could configured
|
||||||
* via ZooKeeper in order to control a Region compaction, flush, and scan policy.
|
* via ZooKeeper in order to control a Region compaction, flush, and scan policy.
|
||||||
*
|
*
|
||||||
* This also demonstrated the use of shared {@link RegionObserver} state.
|
* This also demonstrated the use of shared
|
||||||
|
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
|
||||||
* See {@link RegionCoprocessorEnvironment#getSharedData()}.
|
* See {@link RegionCoprocessorEnvironment#getSharedData()}.
|
||||||
*
|
*
|
||||||
* This would be useful for an incremental backup tool, which would indicate the last
|
* This would be useful for an incremental backup tool, which would indicate the last
|
||||||
|
@ -61,7 +60,8 @@ import org.apache.zookeeper.ZooKeeper;
|
||||||
* inserted since (based on wall clock time).
|
* inserted since (based on wall clock time).
|
||||||
*
|
*
|
||||||
* This implements org.apache.zookeeper.Watcher directly instead of using
|
* This implements org.apache.zookeeper.Watcher directly instead of using
|
||||||
* {@link ZooKeeperWatcher}, because RegionObservers come and go and currently
|
* {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
|
||||||
|
* because RegionObservers come and go and currently
|
||||||
* listeners registered with ZooKeeperWatcher cannot be removed.
|
* listeners registered with ZooKeeperWatcher cannot be removed.
|
||||||
*/
|
*/
|
||||||
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
|
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.nio.ByteBuffer;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||||
import org.apache.hadoop.hbase.KeyValue.MetaComparator;
|
import org.apache.hadoop.hbase.KeyValue.MetaComparator;
|
||||||
|
|
|
@ -18,13 +18,11 @@
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
|
|
|
@ -21,7 +21,6 @@ import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get, remove and modify table descriptors.
|
* Get, remove and modify table descriptors.
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
import org.apache.hadoop.hbase.client.HConnection;
|
import org.apache.hadoop.hbase.client.HConnection;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
|
||||||
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
|
||||||
|
@ -36,9 +35,10 @@ import org.apache.zookeeper.KeeperException;
|
||||||
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
|
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
|
||||||
* currently being archived.
|
* currently being archived.
|
||||||
* <p>
|
* <p>
|
||||||
* This only works properly if the {@link TimeToLiveHFileCleaner} is also enabled (it always should
|
* This only works properly if the
|
||||||
* be), since it may take a little time for the ZK notification to propagate, in which case we may
|
* {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner} is also enabled
|
||||||
* accidentally delete some files.
|
* (it always should be), since it may take a little time for the ZK notification to
|
||||||
|
* propagate, in which case we may accidentally delete some files.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||||
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
|
||||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementatio
|
||||||
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -34,10 +34,8 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||||
import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
||||||
|
@ -51,9 +49,10 @@ import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
* <p>
|
* <p>
|
||||||
* This also allows one to run the scan from an
|
* This also allows one to run the scan from an
|
||||||
* online or offline hbase cluster. The snapshot files can be exported by using the
|
* online or offline hbase cluster. The snapshot files can be exported by using the
|
||||||
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this scanner can be used to
|
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
|
||||||
* run the scan directly over the snapshot files. The snapshot should not be deleted while there
|
* and this scanner can be used to run the scan directly over the snapshot files.
|
||||||
* are open scanners reading from snapshot files.
|
* The snapshot should not be deleted while there are open scanners reading from snapshot
|
||||||
|
* files.
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* An internal RegionScanner is used to execute the {@link Scan} obtained
|
* An internal RegionScanner is used to execute the {@link Scan} obtained
|
||||||
|
@ -67,7 +66,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
* snapshot files, the job has to be run as the HBase user or the user must have group or other
|
* snapshot files, the job has to be run as the HBase user or the user must have group or other
|
||||||
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
|
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
|
||||||
* snapshot/data files will completely circumvent the access control enforced by HBase.
|
* snapshot/data files will completely circumvent the access control enforced by HBase.
|
||||||
* @see TableSnapshotInputFormat
|
* @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.constraint;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -18,15 +18,14 @@
|
||||||
package org.apache.hadoop.hbase.constraint;
|
package org.apache.hadoop.hbase.constraint;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Exception that a user defined constraint throws on failure of a {@link Put}.
|
* Exception that a user defined constraint throws on failure of a
|
||||||
* <p>
|
* {@link org.apache.hadoop.hbase.client.Put}.
|
||||||
* Does <b>NOT</b> attempt the {@link Put} multiple times, since the constraint
|
* <p>Does <b>NOT</b> attempt the
|
||||||
* <it>should</it> fail every time for the same {@link Put} (it should be
|
* {@link org.apache.hadoop.hbase.client.Put} multiple times,
|
||||||
* idempotent).
|
* since the constraint<it>should</it> fail every time for the same
|
||||||
|
* {@link org.apache.hadoop.hbase.client.Put} (it should be idempotent).
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
public class ConstraintException extends org.apache.hadoop.hbase.DoNotRetryIOException {
|
||||||
|
|
|
@ -15,10 +15,8 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -91,7 +89,8 @@ public interface RegionMergeCoordination {
|
||||||
* @param rmd region merge details
|
* @param rmd region merge details
|
||||||
* @param mergedRegion
|
* @param mergedRegion
|
||||||
* @throws IOException If thrown, transaction failed. Call
|
* @throws IOException If thrown, transaction failed. Call
|
||||||
* {@link RegionMergeTransaction#rollback(Server, RegionServerServices)}
|
* {@link org.apache.hadoop.hbase.regionserver.RegionMergeTransaction#rollback(
|
||||||
|
* Server, RegionServerServices)}
|
||||||
*/
|
*/
|
||||||
void completeRegionMergeTransaction(RegionServerServices services, HRegionInfo merged,
|
void completeRegionMergeTransaction(RegionServerServices services, HRegionInfo merged,
|
||||||
HRegion region_a, HRegion region_b, RegionMergeDetails rmd, HRegion mergedRegion)
|
HRegion region_a, HRegion region_b, RegionMergeDetails rmd, HRegion mergedRegion)
|
||||||
|
|
|
@ -27,7 +27,6 @@ import java.util.concurrent.ConcurrentMap;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
|
import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.Task;
|
import org.apache.hadoop.hbase.master.SplitLogManager.Task;
|
||||||
|
@ -43,11 +42,11 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* <P>
|
* <P>
|
||||||
* Methods required for task life circle: <BR>
|
* Methods required for task life circle: <BR>
|
||||||
* {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
|
* {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
|
||||||
* {@link MasterFileSystem} <BR>
|
* {@link org.apache.hadoop.hbase.master.MasterFileSystem} <BR>
|
||||||
* {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
|
* {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
|
||||||
* recovering. Called after all tasks processed <BR>
|
* recovering. Called after all tasks processed <BR>
|
||||||
* {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
|
* {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
|
||||||
* {@link MasterFileSystem} after Active Master is initialized <BR>
|
* {@link org.apache.hadoop.hbase.master.MasterFileSystem} after Active Master is initialized <BR>
|
||||||
* {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
|
* {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
|
||||||
* recovery has been made<BR>
|
* recovery has been made<BR>
|
||||||
* {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>
|
* {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>
|
||||||
|
|
|
@ -29,12 +29,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSeq
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
|
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
|
import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
|
||||||
import org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Coordinated operations for {@link SplitLogWorker} and {@link HLogSplitterHandler} Important
|
* Coordinated operations for {@link SplitLogWorker} and
|
||||||
|
* {@link org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler} Important
|
||||||
* methods for SplitLogWorker: <BR>
|
* methods for SplitLogWorker: <BR>
|
||||||
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
|
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
|
||||||
* ready to supply the tasks <BR>
|
* ready to supply the tasks <BR>
|
||||||
|
|
|
@ -23,11 +23,9 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Coordination operations for split transaction. The split operation should be coordinated at the
|
* Coordination operations for split transaction. The split operation should be coordinated at the
|
||||||
|
@ -82,7 +80,8 @@ public interface SplitTransactionCoordination {
|
||||||
* @param std split transaction details
|
* @param std split transaction details
|
||||||
* @param parent
|
* @param parent
|
||||||
* @throws IOException If thrown, transaction failed. Call
|
* @throws IOException If thrown, transaction failed. Call
|
||||||
* {@link SplitTransaction#rollback(Server, RegionServerServices)}
|
* {@link org.apache.hadoop.hbase.regionserver.
|
||||||
|
* SplitTransaction#rollback(Server, RegionServerServices)}
|
||||||
*/
|
*/
|
||||||
void completeSplitTransaction(RegionServerServices services, HRegion first,
|
void completeSplitTransaction(RegionServerServices services, HRegion first,
|
||||||
HRegion second, SplitTransactionDetails std, HRegion parent) throws IOException;
|
HRegion second, SplitTransactionDetails std, HRegion parent) throws IOException;
|
||||||
|
|
|
@ -43,17 +43,14 @@ import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.SplitLogCounters;
|
import org.apache.hadoop.hbase.SplitLogCounters;
|
||||||
import org.apache.hadoop.hbase.SplitLogTask;
|
import org.apache.hadoop.hbase.SplitLogTask;
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
|
||||||
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status;
|
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager;
|
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
|
import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.Task;
|
import org.apache.hadoop.hbase.master.SplitLogManager.Task;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
|
import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -70,7 +67,8 @@ import org.apache.zookeeper.ZooDefs.Ids;
|
||||||
import org.apache.zookeeper.data.Stat;
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ZooKeeper based implementation of {@link SplitLogManagerCoordination}
|
* ZooKeeper based implementation of
|
||||||
|
* {@link org.apache.hadoop.hbase.master.SplitLogManagerCoordination}
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
||||||
|
@ -685,7 +683,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ZooKeeper implementation of
|
* ZooKeeper implementation of
|
||||||
* {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
|
* {@link org.apache.hadoop.hbase.master.
|
||||||
|
* SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
|
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
|
||||||
|
@ -905,8 +904,10 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@link SplitLogManager} can use objects implementing this interface to finish off a partially
|
* {@link org.apache.hadoop.hbase.master.SplitLogManager} can use
|
||||||
* done task by {@link SplitLogWorker}. This provides a serialization point at the end of the task
|
* objects implementing this interface to finish off a partially
|
||||||
|
* done task by {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}.
|
||||||
|
* This provides a serialization point at the end of the task
|
||||||
* processing. Must be restartable and idempotent.
|
* processing. Must be restartable and idempotent.
|
||||||
*/
|
*/
|
||||||
public interface TaskFinisher {
|
public interface TaskFinisher {
|
||||||
|
@ -1068,7 +1069,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
|
||||||
* Asynchronous handler for zk create RESCAN-node results. Retries on failures.
|
* Asynchronous handler for zk create RESCAN-node results. Retries on failures.
|
||||||
* <p>
|
* <p>
|
||||||
* A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
|
* A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
|
||||||
* {@link SplitLogWorker}s to rescan for new tasks.
|
* {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}s to rescan for new tasks.
|
||||||
*/
|
*/
|
||||||
public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
|
public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
|
||||||
private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);
|
private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);
|
||||||
|
|
|
@ -23,13 +23,11 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
import org.apache.hadoop.hbase.CoordinatedStateManager;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RegionTransition;
|
import org.apache.hadoop.hbase.RegionTransition;
|
||||||
import org.apache.hadoop.hbase.Server;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination;
|
import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination;
|
||||||
import org.apache.hadoop.hbase.executor.EventType;
|
import org.apache.hadoop.hbase.executor.EventType;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
|
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
|
@ -211,7 +209,8 @@ public class ZKSplitTransactionCoordination implements SplitTransactionCoordinat
|
||||||
* @param std split transaction details
|
* @param std split transaction details
|
||||||
* @param parent
|
* @param parent
|
||||||
* @throws IOException If thrown, transaction failed. Call
|
* @throws IOException If thrown, transaction failed. Call
|
||||||
* {@link SplitTransaction#rollback(Server, RegionServerServices)}
|
* {@link org.apache.hadoop.hbase.regionserver.SplitTransaction#rollback(
|
||||||
|
* Server, RegionServerServices)}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void completeSplitTransaction(final RegionServerServices services, HRegion a, HRegion b,
|
public void completeSplitTransaction(final RegionServerServices services, HRegion a, HRegion b,
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.KeeperException.NodeExistsException;
|
|
||||||
import org.apache.zookeeper.data.Stat;
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
|
||||||
public class ZkRegionMergeCoordination implements RegionMergeCoordination {
|
public class ZkRegionMergeCoordination implements RegionMergeCoordination {
|
||||||
|
@ -161,7 +160,7 @@ public class ZkRegionMergeCoordination implements RegionMergeCoordination {
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* Does not transition nodes from other states. If a node already exists for
|
* Does not transition nodes from other states. If a node already exists for
|
||||||
* this region, a {@link NodeExistsException} will be thrown.
|
* this region, a {@link org.apache.zookeeper.KeeperException.NodeExistsException} will be thrown.
|
||||||
*
|
*
|
||||||
* @param region region to be created as offline
|
* @param region region to be created as offline
|
||||||
* @param serverName server event originates from
|
* @param serverName server event originates from
|
||||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.SplitLogCounters;
|
import org.apache.hadoop.hbase.SplitLogCounters;
|
||||||
import org.apache.hadoop.hbase.SplitLogTask;
|
import org.apache.hadoop.hbase.SplitLogTask;
|
||||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||||
import org.apache.hadoop.hbase.master.SplitLogManager;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
@ -584,7 +583,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
|
||||||
* Next part is related to HLogSplitterHandler
|
* Next part is related to HLogSplitterHandler
|
||||||
*/
|
*/
|
||||||
/**
|
/**
|
||||||
* endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to
|
* endTask() can fail and the only way to recover out of it is for the
|
||||||
|
* {@link org.apache.hadoop.hbase.master.SplitLogManager} to
|
||||||
* timeout the task node.
|
* timeout the task node.
|
||||||
* @param slt
|
* @param slt
|
||||||
* @param ctr
|
* @param ctr
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.HTableInterface;
|
import org.apache.hadoop.hbase.client.HTableInterface;
|
||||||
import org.apache.hadoop.hbase.client.HTableWrapper;
|
import org.apache.hadoop.hbase.client.HTableWrapper;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
|
import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
|
||||||
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
|
import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet;
|
||||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.Coprocessor;
|
import org.apache.hadoop.hbase.Coprocessor;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
|
@ -48,12 +47,10 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||||
import org.apache.hadoop.hbase.regionserver.OperationStatus;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.ScanType;
|
import org.apache.hadoop.hbase.regionserver.ScanType;
|
||||||
import org.apache.hadoop.hbase.regionserver.Store;
|
import org.apache.hadoop.hbase.regionserver.Store;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||||
|
@ -209,7 +206,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* options:
|
* options:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
||||||
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
|
* from this method. The custom scanner can then inspect
|
||||||
|
* {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped
|
||||||
* scanner, applying its own policy to what gets written.</li>
|
* scanner, applying its own policy to what gets written.</li>
|
||||||
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
||||||
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
||||||
|
@ -235,7 +233,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* options:
|
* options:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
|
||||||
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped
|
* from this method. The custom scanner can then inspect
|
||||||
|
* {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped
|
||||||
* scanner, applying its own policy to what gets written.</li>
|
* scanner, applying its own policy to what gets written.</li>
|
||||||
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
|
||||||
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
|
||||||
|
@ -266,7 +265,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* effect in this hook.
|
* effect in this hook.
|
||||||
* @param c the environment provided by the region server
|
* @param c the environment provided by the region server
|
||||||
* @param store the store being compacted
|
* @param store the store being compacted
|
||||||
* @param scanners the list {@link StoreFileScanner}s to be read from
|
* @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
|
||||||
|
* to be read from
|
||||||
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
||||||
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
||||||
* files
|
* files
|
||||||
|
@ -290,7 +290,8 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* effect in this hook.
|
* effect in this hook.
|
||||||
* @param c the environment provided by the region server
|
* @param c the environment provided by the region server
|
||||||
* @param store the store being compacted
|
* @param store the store being compacted
|
||||||
* @param scanners the list {@link StoreFileScanner}s to be read from
|
* @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
|
||||||
|
* to be read from
|
||||||
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
|
||||||
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
|
||||||
* files
|
* files
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
package org.apache.hadoop.hbase.errorhandling;
|
package org.apache.hadoop.hbase.errorhandling;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is an interface for a cooperative exception throwing mechanism. Implementations are
|
* This is an interface for a cooperative exception throwing mechanism. Implementations are
|
||||||
|
|
|
@ -41,12 +41,10 @@ import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
|
||||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
|
||||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.CompoundBloomFilter;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -341,9 +339,10 @@ public class HFileBlock implements Cacheable {
|
||||||
/**
|
/**
|
||||||
* Returns the buffer this block stores internally. The clients must not
|
* Returns the buffer this block stores internally. The clients must not
|
||||||
* modify the buffer object. This method has to be public because it is
|
* modify the buffer object. This method has to be public because it is
|
||||||
* used in {@link CompoundBloomFilter} to avoid object creation on every
|
* used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter}
|
||||||
* Bloom filter lookup, but has to be used with caution. Checksum data
|
* to avoid object creation on every Bloom filter lookup, but has to
|
||||||
* is not included in the returned buffer but header data is.
|
* be used with caution. Checksum data is not included in the returned
|
||||||
|
* buffer but header data is.
|
||||||
*
|
*
|
||||||
* @return the buffer of this block for read-only operations
|
* @return the buffer of this block for read-only operations
|
||||||
*/
|
*/
|
||||||
|
@ -356,7 +355,7 @@ public class HFileBlock implements Cacheable {
|
||||||
/**
|
/**
|
||||||
* Returns the buffer of this block, including header data. The clients must
|
* Returns the buffer of this block, including header data. The clients must
|
||||||
* not modify the buffer object. This method has to be public because it is
|
* not modify the buffer object. This method has to be public because it is
|
||||||
* used in {@link BucketCache} to avoid buffer copy.
|
* used in {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} to avoid buffer copy.
|
||||||
*
|
*
|
||||||
* @return the buffer with header and checksum included for read-only operations
|
* @return the buffer with header and checksum included for read-only operations
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.io.hfile.HFile.CachingBlockReader;
|
||||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter;
|
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
|
@ -55,9 +54,9 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
* ({@link BlockIndexReader}) single-level and multi-level block indexes.
|
* ({@link BlockIndexReader}) single-level and multi-level block indexes.
|
||||||
*
|
*
|
||||||
* Examples of how to use the block index writer can be found in
|
* Examples of how to use the block index writer can be found in
|
||||||
* {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how
|
* {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter}
|
||||||
* to use the reader can be found in {@link HFileReaderV2} and
|
* and {@link HFileWriterV2}. Examples of how to use the reader can be
|
||||||
* TestHFileBlockIndex.
|
* found in {@link HFileReaderV2} and TestHFileBlockIndex.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class HFileBlockIndex {
|
public class HFileBlockIndex {
|
||||||
|
|
|
@ -21,7 +21,6 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
|
||||||
|
|
|
@ -23,7 +23,6 @@ import java.nio.ByteBuffer;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A scanner allows you to position yourself within a HFile and
|
* A scanner allows you to position yourself within a HFile and
|
||||||
|
@ -132,7 +131,7 @@ public interface HFileScanner {
|
||||||
*/
|
*/
|
||||||
ByteBuffer getValue();
|
ByteBuffer getValue();
|
||||||
/**
|
/**
|
||||||
* @return Instance of {@link KeyValue}.
|
* @return Instance of {@link org.apache.hadoop.hbase.KeyValue}.
|
||||||
*/
|
*/
|
||||||
Cell getKeyValue();
|
Cell getKeyValue();
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.io.HeapSize;
|
import org.apache.hadoop.hbase.io.HeapSize;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
|
||||||
|
@ -59,11 +58,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
* constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
|
* constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
|
||||||
*
|
*
|
||||||
* Contains three levels of block priority to allow for
|
* Contains three levels of block priority to allow for
|
||||||
* scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An
|
* scan-resistance and in-memory families
|
||||||
|
* {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An
|
||||||
* in-memory column family is a column family that should be served from memory if possible):
|
* in-memory column family is a column family that should be served from memory if possible):
|
||||||
* single-access, multiple-accesses, and in-memory priority.
|
* single-access, multiple-accesses, and in-memory priority.
|
||||||
* A block is added with an in-memory priority flag if
|
* A block is added with an in-memory priority flag if
|
||||||
* {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access
|
* {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()},
|
||||||
|
* otherwise a block becomes a single access
|
||||||
* priority the first time it is read into this block cache. If a block is accessed again while
|
* priority the first time it is read into this block cache. If a block is accessed again while
|
||||||
* in cache, it is marked as a multiple access priority block. This delineation of blocks is used
|
* in cache, it is marked as a multiple access priority block. This delineation of blocks is used
|
||||||
* to prevent scans from thrashing the cache adding a least-frequently-used
|
* to prevent scans from thrashing the cache adding a least-frequently-used
|
||||||
|
|
|
@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.io.hfile.Cacheable;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
|
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
|
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
||||||
import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
|
||||||
import org.apache.hadoop.hbase.util.ConcurrentIndex;
|
import org.apache.hadoop.hbase.util.ConcurrentIndex;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -83,8 +82,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
|
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
|
||||||
*
|
*
|
||||||
* <p>BucketCache can be used as mainly a block cache (see
|
* <p>BucketCache can be used as mainly a block cache (see
|
||||||
* {@link CombinedBlockCache}), combined with LruBlockCache to decrease CMS GC and
|
* {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}),
|
||||||
* heap fragmentation.
|
* combined with LruBlockCache to decrease CMS GC and heap fragmentation.
|
||||||
*
|
*
|
||||||
* <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
|
* <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
|
||||||
* blocks) to enlarge cache space via
|
* blocks) to enlarge cache space via
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.ipc;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
|
|
||||||
|
|
|
@ -32,14 +32,13 @@ import org.apache.hadoop.mapred.JobClient;
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.OutputCollector;
|
import org.apache.hadoop.mapred.OutputCollector;
|
||||||
import org.apache.hadoop.mapred.Reporter;
|
import org.apache.hadoop.mapred.Reporter;
|
||||||
import org.apache.hadoop.mapred.lib.IdentityReducer;
|
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A job with a map to count rows.
|
* A job with a map to count rows.
|
||||||
* Map outputs table rows IF the input row has columns that have content.
|
* Map outputs table rows IF the input row has columns that have content.
|
||||||
* Uses an {@link IdentityReducer}
|
* Uses org.apache.hadoop.mapred.lib.IdentityReducer
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.filter.Filter;
|
import org.apache.hadoop.hbase.filter.Filter;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
|
||||||
import org.apache.hadoop.mapred.InputFormat;
|
import org.apache.hadoop.mapred.InputFormat;
|
||||||
import org.apache.hadoop.mapred.InputSplit;
|
import org.apache.hadoop.mapred.InputSplit;
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
|
@ -104,11 +103,12 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
* Calculates the splits that will serve as input for the map tasks.
|
* Calculates the splits that will serve as input for the map tasks.
|
||||||
* <ul>
|
* <ul>
|
||||||
* Splits are created in number equal to the smallest between numSplits and
|
* Splits are created in number equal to the smallest between numSplits and
|
||||||
* the number of {@link HRegion}s in the table. If the number of splits is
|
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s
|
||||||
* smaller than the number of {@link HRegion}s then splits are spanned across
|
* in the table. If the number of splits is smaller than the number of
|
||||||
* multiple {@link HRegion}s and are grouped the most evenly possible. In the
|
* {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are
|
||||||
* case splits are uneven the bigger splits are placed first in the
|
* spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
|
||||||
* {@link InputSplit} array.
|
* and are grouped the most evenly possible. In the case splits are uneven the
|
||||||
|
* bigger splits are placed first in the {@link InputSplit} array.
|
||||||
*
|
*
|
||||||
* @param job the map task {@link JobConf}
|
* @param job the map task {@link JobConf}
|
||||||
* @param numSplits a hint to calculate the number of splits (mapred.map.tasks).
|
* @param numSplits a hint to calculate the number of splits (mapred.map.tasks).
|
||||||
|
|
|
@ -22,7 +22,6 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.filter.Filter;
|
import org.apache.hadoop.hbase.filter.Filter;
|
||||||
|
@ -60,7 +59,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param htable the {@link HTable} to scan.
|
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||||
*/
|
*/
|
||||||
public void setHTable(Table htable) {
|
public void setHTable(Table htable) {
|
||||||
this.recordReaderImpl.setHTable(htable);
|
this.recordReaderImpl.setHTable(htable);
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
@ -114,7 +113,7 @@ public class TableRecordReaderImpl {
|
||||||
return this.startRow;
|
return this.startRow;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* @param htable the {@link HTable} to scan.
|
* @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||||
*/
|
*/
|
||||||
public void setHTable(Table htable) {
|
public void setHTable(Table htable) {
|
||||||
Configuration conf = htable.getConfiguration();
|
Configuration conf = htable.getConfiguration();
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.filter.PrefixFilter;
|
||||||
import org.apache.hadoop.hbase.filter.RegexStringComparator;
|
import org.apache.hadoop.hbase.filter.RegexStringComparator;
|
||||||
import org.apache.hadoop.hbase.filter.RowFilter;
|
import org.apache.hadoop.hbase.filter.RowFilter;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.io.TimeRange;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.io.IntWritable;
|
import org.apache.hadoop.io.IntWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
|
|
@ -26,12 +26,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.client.Mutation;
|
import org.apache.hadoop.hbase.client.Mutation;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.mapreduce.OutputFormat;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convenience class that simply writes all values (which must be
|
* Convenience class that simply writes all values (which must be
|
||||||
* {@link org.apache.hadoop.hbase.client.Put Put} or
|
* {@link org.apache.hadoop.hbase.client.Put} or
|
||||||
* {@link org.apache.hadoop.hbase.client.Delete Delete} instances)
|
* {@link org.apache.hadoop.hbase.client.Delete} instances)
|
||||||
* passed to it out to the configured HBase table. This works in combination
|
* passed to it out to the configured HBase table. This works in combination
|
||||||
* with {@link TableOutputFormat} which actually does the writing to HBase.<p>
|
* with {@link TableOutputFormat} which actually does the writing to HBase.<p>
|
||||||
*
|
*
|
||||||
|
@ -46,8 +45,8 @@ import org.apache.hadoop.mapreduce.OutputFormat;
|
||||||
* </code></blockquote>
|
* </code></blockquote>
|
||||||
* This will also set the proper {@link TableOutputFormat} which is given the
|
* This will also set the proper {@link TableOutputFormat} which is given the
|
||||||
* <code>table</code> parameter. The
|
* <code>table</code> parameter. The
|
||||||
* {@link org.apache.hadoop.hbase.client.Put Put} or
|
* {@link org.apache.hadoop.hbase.client.Put} or
|
||||||
* {@link org.apache.hadoop.hbase.client.Delete Delete} define the
|
* {@link org.apache.hadoop.hbase.client.Delete} define the
|
||||||
* row and columns implicitly.
|
* row and columns implicitly.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
|
@ -60,13 +59,14 @@ extends TableReducer<Writable, Mutation, Writable> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes each given record, consisting of the row key and the given values,
|
* Writes each given record, consisting of the row key and the given values,
|
||||||
* to the configured {@link OutputFormat}. It is emitting the row key and each
|
* to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}.
|
||||||
|
* It is emitting the row key and each
|
||||||
* {@link org.apache.hadoop.hbase.client.Put Put} or
|
* {@link org.apache.hadoop.hbase.client.Put Put} or
|
||||||
* {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
|
* {@link org.apache.hadoop.hbase.client.Delete} as separate pairs.
|
||||||
*
|
*
|
||||||
* @param key The current row key.
|
* @param key The current row key.
|
||||||
* @param values The {@link org.apache.hadoop.hbase.client.Put Put} or
|
* @param values The {@link org.apache.hadoop.hbase.client.Put Put} or
|
||||||
* {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given
|
* {@link org.apache.hadoop.hbase.client.Delete} list for the given
|
||||||
* row.
|
* row.
|
||||||
* @param context The context of the reduce.
|
* @param context The context of the reduce.
|
||||||
* @throws IOException When writing the record fails.
|
* @throws IOException When writing the record fails.
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
|
||||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
@ -120,7 +119,7 @@ public class TableRecordReaderImpl {
|
||||||
/**
|
/**
|
||||||
* Sets the HBase table.
|
* Sets the HBase table.
|
||||||
*
|
*
|
||||||
* @param htable The {@link HTable} to scan.
|
* @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
|
||||||
*/
|
*/
|
||||||
public void setHTable(Table htable) {
|
public void setHTable(Table htable) {
|
||||||
Configuration conf = htable.getConfiguration();
|
Configuration conf = htable.getConfiguration();
|
||||||
|
|
|
@ -31,11 +31,8 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.client.TableSnapshotScanner;
|
|
||||||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
|
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.mapreduce.InputFormat;
|
import org.apache.hadoop.mapreduce.InputFormat;
|
||||||
import org.apache.hadoop.mapreduce.InputSplit;
|
import org.apache.hadoop.mapreduce.InputSplit;
|
||||||
|
@ -52,9 +49,9 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* hlogs, etc) directly to provide maximum performance. The snapshot is not required to be
|
* hlogs, etc) directly to provide maximum performance. The snapshot is not required to be
|
||||||
* restored to the live cluster or cloned. This also allows to run the mapreduce job from an
|
* restored to the live cluster or cloned. This also allows to run the mapreduce job from an
|
||||||
* online or offline hbase cluster. The snapshot files can be exported by using the
|
* online or offline hbase cluster. The snapshot files can be exported by using the
|
||||||
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to
|
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
|
||||||
* run the mapreduce job directly over the snapshot files. The snapshot should not be deleted
|
* and this InputFormat can be used to run the mapreduce job directly over the snapshot files.
|
||||||
* while there are jobs reading from snapshot files.
|
* The snapshot should not be deleted while there are jobs reading from snapshot files.
|
||||||
* <p>
|
* <p>
|
||||||
* Usage is similar to TableInputFormat, and
|
* Usage is similar to TableInputFormat, and
|
||||||
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
|
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
|
||||||
|
@ -71,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* <p>
|
* <p>
|
||||||
* Internally, this input format restores the snapshot into the given tmp directory. Similar to
|
* Internally, this input format restores the snapshot into the given tmp directory. Similar to
|
||||||
* {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
|
* {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
|
||||||
* from each RecordReader. An internal RegionScanner is used to execute the {@link Scan} obtained
|
* from each RecordReader. An internal RegionScanner is used to execute the
|
||||||
* from the user.
|
* {@link org.apache.hadoop.hbase.CellScanner} obtained from the user.
|
||||||
* <p>
|
* <p>
|
||||||
* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
|
* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
|
||||||
* snapshot files and data files.
|
* snapshot files and data files.
|
||||||
|
@ -82,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* user or the user must have group or other privileges in the filesystem (See HBASE-8369).
|
* user or the user must have group or other privileges in the filesystem (See HBASE-8369).
|
||||||
* Note that, given other users access to read from snapshot/data files will completely circumvent
|
* Note that, given other users access to read from snapshot/data files will completely circumvent
|
||||||
* the access control enforced by HBase.
|
* the access control enforced by HBase.
|
||||||
* @see TableSnapshotScanner
|
* @see org.apache.hadoop.hbase.client.TableSnapshotScanner
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.util.VersionInfo;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.Inet4Address;
|
|
||||||
import java.net.Inet6Address;
|
import java.net.Inet6Address;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
|
|
@ -127,7 +127,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.Watcher;
|
|
||||||
import org.mortbay.jetty.Connector;
|
import org.mortbay.jetty.Connector;
|
||||||
import org.mortbay.jetty.nio.SelectChannelConnector;
|
import org.mortbay.jetty.nio.SelectChannelConnector;
|
||||||
import org.mortbay.jetty.servlet.Context;
|
import org.mortbay.jetty.servlet.Context;
|
||||||
|
@ -151,7 +150,7 @@ import com.google.protobuf.Service;
|
||||||
*
|
*
|
||||||
* <p>You can also shutdown just this master. Call {@link #stopMaster()}.
|
* <p>You can also shutdown just this master. Call {@link #stopMaster()}.
|
||||||
*
|
*
|
||||||
* @see Watcher
|
* @see org.apache.zookeeper.Watcher
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
|
|
|
@ -30,7 +30,6 @@ import javax.servlet.http.HttpServletResponse;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
|
import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.master;
|
package org.apache.hadoop.hbase.master;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
|
|
|
@ -44,11 +44,9 @@ import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.hadoop.hbase.ServerLoad;
|
import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.YouAreDeadException;
|
import org.apache.hadoop.hbase.YouAreDeadException;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||||
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
|
||||||
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
|
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
|
||||||
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
|
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
|
||||||
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
|
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
|
||||||
|
|
|
@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
|
||||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
||||||
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -74,8 +73,9 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
* <p>SplitLogManager monitors the tasks that it creates using the
|
* <p>SplitLogManager monitors the tasks that it creates using the
|
||||||
* timeoutMonitor thread. If a task's progress is slow then
|
* timeoutMonitor thread. If a task's progress is slow then
|
||||||
* {@link SplitLogManagerCoordination#checkTasks} will take away the
|
* {@link SplitLogManagerCoordination#checkTasks} will take away the
|
||||||
* task from the owner {@link SplitLogWorker} and the task will be up for grabs again. When the
|
* task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
|
||||||
* task is done then it is deleted by SplitLogManager.
|
* and the task will be up for grabs again. When the task is done then it is deleted
|
||||||
|
* by SplitLogManager.
|
||||||
*
|
*
|
||||||
* <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
|
* <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
|
||||||
* log files. The caller thread waits in this method until all the log files
|
* log files. The caller thread waits in this method until all the log files
|
||||||
|
|
|
@ -22,7 +22,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.Chore;
|
import org.apache.hadoop.hbase.Chore;
|
||||||
import org.apache.hadoop.hbase.HBaseIOException;
|
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RegionLoad;
|
import org.apache.hadoop.hbase.RegionLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||||
import org.apache.hadoop.hbase.master.AssignmentManager;
|
|
||||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
import org.apache.hadoop.hbase.master.LoadBalancer;
|
||||||
import org.apache.hadoop.hbase.master.MasterServices;
|
import org.apache.hadoop.hbase.master.MasterServices;
|
||||||
import org.apache.hadoop.hbase.master.RackManager;
|
import org.apache.hadoop.hbase.master.RackManager;
|
||||||
|
@ -59,8 +58,9 @@ import com.google.common.collect.Sets;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The base class for load balancers. It provides the the functions used to by
|
* The base class for load balancers. It provides the the functions used to by
|
||||||
* {@link AssignmentManager} to assign regions in the edge cases. It doesn't
|
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
|
||||||
* provide an implementation of the actual balancing algorithm.
|
* in the edge cases. It doesn't provide an implementation of the actual
|
||||||
|
* balancing algorithm.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public abstract class BaseLoadBalancer implements LoadBalancer {
|
public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerLoad;
|
import org.apache.hadoop.hbase.ServerLoad;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.master.LoadBalancer;
|
|
||||||
import org.apache.hadoop.hbase.master.RackManager;
|
import org.apache.hadoop.hbase.master.RackManager;
|
||||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||||
import org.apache.hadoop.hbase.master.ServerManager;
|
import org.apache.hadoop.hbase.master.ServerManager;
|
||||||
|
@ -42,16 +41,16 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An implementation of the {@link LoadBalancer} that assigns favored nodes for
|
* An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer}
|
||||||
* each region. There is a Primary RegionServer that hosts the region, and then
|
* that assigns favored nodes for each region. There is a Primary RegionServer
|
||||||
* there is Secondary and Tertiary RegionServers. Currently, the favored nodes
|
* that hosts the region, and then there is Secondary and Tertiary RegionServers.
|
||||||
* information is used in creating HDFS files - the Primary RegionServer passes
|
* Currently, the favored nodes information is used in creating HDFS files - the Primary
|
||||||
* the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem
|
* RegionServer passes the primary, secondary, tertiary node addresses as hints to the
|
||||||
* API for creating files on the filesystem. These nodes are treated as hints by
|
* DistributedFileSystem API for creating files on the filesystem. These nodes are treated
|
||||||
* the HDFS to place the blocks of the file. This alleviates the problem to do with
|
* as hints by the HDFS to place the blocks of the file. This alleviates the problem to
|
||||||
* reading from remote nodes (since we can make the Secondary RegionServer as the new
|
* do with reading from remote nodes (since we can make the Secondary RegionServer as the
|
||||||
* Primary RegionServer) after a region is recovered. This should help provide consistent
|
* new Primary RegionServer) after a region is recovered. This should help provide
|
||||||
* read latencies for the regions even when their primary region servers die.
|
* consistent read latencies for the regions even when their primary region servers die.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue