HBASE-4670 Fix javadoc warnings
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1188537 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0b4c5efe75
commit
3c764b1e05
|
@ -400,6 +400,7 @@ Release 0.92.0 - Unreleased
|
|||
master (Eugene Koontz via apurtell)
|
||||
HBASE-3512 Shell support for listing currently loaded coprocessors (Eugene
|
||||
Koontz via apurtell)
|
||||
HBASE-4670 Fix javadoc warnings
|
||||
|
||||
TESTS
|
||||
HBASE-4450 test for number of blocks read: to serve as baseline for expected
|
||||
|
|
|
@ -563,7 +563,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
|||
}
|
||||
|
||||
/**
|
||||
* @param keepDeletedRows True if deleted rows should not be collected
|
||||
* @param keepDeletedCells True if deleted rows should not be collected
|
||||
* immediately.
|
||||
*/
|
||||
public void setKeepDeletedCells(boolean keepDeletedCells) {
|
||||
|
|
|
@ -437,13 +437,13 @@ public final class HConstants {
|
|||
public static int DEFAULT_HBASE_META_SCANNER_CACHING = 100;
|
||||
|
||||
/**
|
||||
* Parameter name for unique identifier for this {@link Configuration}
|
||||
* instance. If there are two or more {@link Configuration} instances that,
|
||||
* Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
|
||||
* instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
|
||||
* for all intents and purposes, are the same except for their instance ids,
|
||||
* then they will not be able to share the same {@link Connection} instance.
|
||||
* then they will not be able to share the same {@link org.apache.hadoop.hbase.client.HConnection} instance.
|
||||
* On the other hand, even if the instance ids are the same, it could result
|
||||
* in non-shared {@link Connection} instances if some of the other connection
|
||||
* parameters differ.
|
||||
* in non-shared {@link org.apache.hadoop.hbase.client.HConnection}
|
||||
* instances if some of the other connection parameters differ.
|
||||
*/
|
||||
public static String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ public class HServerAddress implements WritableComparable<HServerAddress> {
|
|||
}
|
||||
|
||||
/** @return Bind address -- the raw IP, the result of a call to
|
||||
* {@link InetSocketAddress#getAddress()#getHostAddress()} --
|
||||
* InetSocketAddress#getAddress()#getHostAddress() --
|
||||
* or null if cannot resolve */
|
||||
public String getBindAddress() {
|
||||
return getBindAddressInternal(address);
|
||||
|
|
|
@ -104,8 +104,6 @@ implements WritableComparable<HServerInfo> {
|
|||
|
||||
/**
|
||||
* @return ServerName and load concatenated.
|
||||
* @see #getServerName()
|
||||
* @see #getLoad()
|
||||
*/
|
||||
@Override
|
||||
public synchronized String toString() {
|
||||
|
|
|
@ -126,8 +126,6 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
/**
|
||||
* <em>INTERNAL</em> Used by HBase Shell interface to access this metadata
|
||||
* attribute which denotes if the deferred log flush option is enabled
|
||||
*
|
||||
* @see #getDeferredLogFlush()
|
||||
*/
|
||||
public static final String DEFERRED_LOG_FLUSH = "DEFERRED_LOG_FLUSH";
|
||||
private static final ImmutableBytesWritable DEFERRED_LOG_FLUSH_KEY =
|
||||
|
@ -524,7 +522,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
* before these deferred edits in memory are flushed onto the filesystem.
|
||||
* </p>
|
||||
*
|
||||
* @param true if that deferred log flush is enabled on the table.
|
||||
* @param isDeferredLogFlush
|
||||
*/
|
||||
public void setDeferredLogFlush(final boolean isDeferredLogFlush) {
|
||||
setValue(DEFERRED_LOG_FLUSH_KEY, isDeferredLogFlush? TRUE: FALSE);
|
||||
|
@ -552,8 +550,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
/**
|
||||
* This get the class associated with the region split policy which
|
||||
* determines when a region split should occur. The class used by
|
||||
* default is {@link ConstantSizeRegionSplitPolicy} which split the
|
||||
* region base on a constant {@link #getMaxFileSize()}
|
||||
* default is {@link org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy}
|
||||
* which split the region base on a constant {@link #getMaxFileSize()}
|
||||
*
|
||||
* @return the class name of the region split policy for this table.
|
||||
* If this returns null, the default constant size based split policy
|
||||
|
@ -877,7 +875,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable collection of all the {@link HColumnDescriptors}
|
||||
* Returns an unmodifiable collection of all the {@link HColumnDescriptor}
|
||||
* of all the column families of the table.
|
||||
*
|
||||
* @return Immutable collection of {@link HColumnDescriptor} of all the
|
||||
|
|
|
@ -1847,7 +1847,6 @@ public class KeyValue implements Writable, HeapSize {
|
|||
* (the value part of the returned KV is always empty). Used in creating
|
||||
* "fake keys" for the multi-column Bloom filter optimization to skip the
|
||||
* row/column we already know is not in the file.
|
||||
* @param kv the key-value pair to take row and column from
|
||||
* @return the last key on the row/column of the given key-value pair
|
||||
*/
|
||||
public KeyValue createLastOnRowCol() {
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* usually timestamp of server startup). The {@link #toString()} format of
|
||||
* ServerName is safe to use in the filesystem and as znode name up in
|
||||
* ZooKeeper. Its format is:
|
||||
* <code><hostname> '{@link #SERVERNAME_SEPARATOR"}' <port> '{@ink #SERVERNAME_SEPARATOR"}' <startcode></code>.
|
||||
* <code><hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode></code>.
|
||||
* For example, if hostname is <code>example.org</code>, port is <code>1234</code>,
|
||||
* and the startcode for the regionserver is <code>1212121212</code>, then
|
||||
* the {@link #toString()} would be <code>example.org,1234,1212121212</code>.
|
||||
|
@ -220,7 +220,7 @@ public class ServerName implements Comparable<ServerName> {
|
|||
|
||||
/**
|
||||
* @param left
|
||||
* @param rigth
|
||||
* @param right
|
||||
* @return True if <code>other</code> has same hostname and port.
|
||||
*/
|
||||
public static boolean isSameHostnameAndPort(final ServerName left,
|
||||
|
|
|
@ -49,8 +49,6 @@ public interface TableDescriptors {
|
|||
/**
|
||||
* Get Map of all HTableDescriptors. Populates the descriptor cache as a
|
||||
* side effect.
|
||||
* @param fs
|
||||
* @param rootdir
|
||||
* @return Map of all descriptors.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -145,7 +145,7 @@ public class CatalogTracker {
|
|||
* Does not timeout.
|
||||
* @param zk If zk is null, we'll create an instance (and shut it down
|
||||
* when {@link #stop()} is called) else we'll use what is passed.
|
||||
* @param connection server connection
|
||||
* @param conf
|
||||
* @param abortable If fatal exception we'll call abort on this. May be null.
|
||||
* If it is we'll use the Connection associated with the passed
|
||||
* {@link Configuration} as our Abortable.
|
||||
|
|
|
@ -50,7 +50,7 @@ public class MetaMigrationRemovingHTD {
|
|||
/**
|
||||
* Update legacy META rows, removing HTD from HRI.
|
||||
* @param masterServices
|
||||
* @return
|
||||
* @return List of table descriptors.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<HTableDescriptor> updateMetaWithNewRegionInfo(
|
||||
|
@ -78,7 +78,7 @@ public class MetaMigrationRemovingHTD {
|
|||
/**
|
||||
* Update the ROOT with new HRI. (HRI with no HTD)
|
||||
* @param masterServices
|
||||
* @return
|
||||
* @return List of table descriptors
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<HTableDescriptor> updateRootWithNewRegionInfo(
|
||||
|
|
|
@ -251,7 +251,7 @@ public class MetaReader {
|
|||
* @param metaServer connection to server hosting ROOT
|
||||
* @return location of META in ROOT where location, or null if not available
|
||||
* @throws IOException
|
||||
* @deprecated Does not retry; use {@link #readRegionLocation(CatalogTracker, byte[])
|
||||
* @deprecated Does not retry; use {@link #readRegionLocation(CatalogTracker, byte[])}
|
||||
*/
|
||||
public static ServerName readMetaLocation(HRegionInterface metaServer)
|
||||
throws IOException {
|
||||
|
@ -589,7 +589,7 @@ public class MetaReader {
|
|||
|
||||
/**
|
||||
* @param catalogTracker
|
||||
* @param hsi Server specification
|
||||
* @param serverName
|
||||
* @return List of user regions installed on this server (does not include
|
||||
* catalog regions).
|
||||
* @throws IOException
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.hadoop.io.Writable;
|
|||
* <p>
|
||||
* To append to a set of columns of a row, instantiate an Append object with the
|
||||
* row to append to. At least one column to append must be specified using the
|
||||
* {@link #add(byte[], byte[], long)} method.
|
||||
* {@link #add(byte[], byte[], byte[])} method.
|
||||
*/
|
||||
public class Append extends Mutation implements Writable {
|
||||
// TODO: refactor to derive from Put?
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -192,7 +191,7 @@ public interface HConnection extends Abortable, Closeable {
|
|||
* @param regionServer - the server to connect to
|
||||
* @return proxy for HRegionServer
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @deprecated Use {@link #getHRegionConnection(InetSocketAddress)}
|
||||
* @deprecated Use {@link #getHRegionConnection(String, int)}
|
||||
*/
|
||||
public HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
||||
throws IOException;
|
||||
|
|
|
@ -208,10 +208,7 @@ public class HConnectionManager {
|
|||
* This will then close connection to
|
||||
* the zookeeper ensemble and let go of all resources.
|
||||
*
|
||||
* @param conf
|
||||
* configuration whose identity is used to find {@link HConnection}
|
||||
* instance.
|
||||
* .
|
||||
* @param connection
|
||||
*/
|
||||
public static void deleteStaleConnection(HConnection connection) {
|
||||
deleteConnection(connection, true, true);
|
||||
|
@ -302,7 +299,7 @@ public class HConnectionManager {
|
|||
* of a {@link HConnection} instance based on the given {@link Configuration}.
|
||||
*
|
||||
* <p>
|
||||
* If you find yourself wanting to use a {@link Connection} for a relatively
|
||||
* If you find yourself wanting to use a {@link HConnection} for a relatively
|
||||
* short duration of time, and do not want to deal with the hassle of creating
|
||||
* and cleaning up that resource, then you should consider using this
|
||||
* convenience class.
|
||||
|
|
|
@ -294,7 +294,7 @@ public interface HTableInterface {
|
|||
/**
|
||||
* Atomically increments a column value.
|
||||
* <p>
|
||||
* Equivalent to {@code {@link #incrementColumnValue(byte[], byte[], byte[],
|
||||
* Equivalent to {@link #incrementColumnValue(byte[], byte[], byte[],
|
||||
* long, boolean) incrementColumnValue}(row, family, qualifier, amount,
|
||||
* <b>true</b>)}
|
||||
* @param row The row that contains the cell to increment.
|
||||
|
@ -392,7 +392,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* @param protocol The class or interface defining the remote protocol
|
||||
* @param row The row key used to identify the remote region location
|
||||
* @return
|
||||
* @return A CoprocessorProtocol instance
|
||||
*/
|
||||
<T extends CoprocessorProtocol> T coprocessorProxy(Class<T> protocol, byte[] row);
|
||||
|
||||
|
@ -417,7 +417,7 @@ public interface HTableInterface {
|
|||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
|
||||
* method
|
||||
* @return a <code>Map</code> of region names to
|
||||
* {@link Batch.Call#call(Object)} return values
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} return values
|
||||
*/
|
||||
<T extends CoprocessorProtocol, R> Map<byte[],R> coprocessorExec(
|
||||
Class<T> protocol, byte[] startKey, byte[] endKey, Batch.Call<T,R> callable)
|
||||
|
@ -436,7 +436,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* <p>
|
||||
* For each result, the given
|
||||
* {@link Batch.Callback#update(byte[], byte[], Object)}
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
|
||||
* method will be called.
|
||||
*</p>
|
||||
*
|
||||
|
@ -447,7 +447,7 @@ public interface HTableInterface {
|
|||
* @param callable wraps the CoprocessorProtocol implementation method calls
|
||||
* made per-region
|
||||
* @param callback an instance upon which
|
||||
* {@link Batch.Callback#update(byte[], byte[], Object)} with the
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)} with the
|
||||
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
|
||||
* return value for each region
|
||||
* @param <T> CoprocessorProtocol subclass for the remote invocation
|
||||
|
|
|
@ -106,8 +106,6 @@ public class HTablePool implements Closeable {
|
|||
* configuration
|
||||
* @param maxSize
|
||||
* maximum number of references to keep for each table
|
||||
* @param tableFactory
|
||||
* table factory
|
||||
* @param poolType
|
||||
* pool type which is one of {@link PoolType#Reusable} or
|
||||
* {@link PoolType#ThreadLocal}
|
||||
|
|
|
@ -131,7 +131,6 @@ public abstract class Mutation extends OperationWithAttributes {
|
|||
|
||||
/**
|
||||
* Method for setting the put's familyMap
|
||||
* @return familyMap
|
||||
*/
|
||||
public void setFamilyMap(Map<byte [], List<KeyValue>> map) {
|
||||
this.familyMap = map;
|
||||
|
|
|
@ -76,11 +76,6 @@ extends RetriesExhaustedException {
|
|||
return actions.get(i);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param i
|
||||
* @return
|
||||
* @deprecated
|
||||
*/
|
||||
public HServerAddress getAddress(int i) {
|
||||
return new HServerAddress(Addressing.createInetSocketAddressFromHostAndPortStr(getHostnamePort(i)));
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ public class AggregationClient {
|
|||
* @param tableName
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return <R, S>
|
||||
* @throws Throwable
|
||||
*/
|
||||
public <R, S> long rowCount(final byte[] tableName,
|
||||
|
@ -282,7 +282,7 @@ public class AggregationClient {
|
|||
* @param tableName
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return <R, S>
|
||||
* @throws Throwable
|
||||
*/
|
||||
public <R, S> double avg(final byte[] tableName,
|
||||
|
@ -348,7 +348,7 @@ public class AggregationClient {
|
|||
* @param tableName
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return <R, S>
|
||||
* @throws Throwable
|
||||
*/
|
||||
public <R, S> double std(final byte[] tableName, ColumnInterpreter<R, S> ci,
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* and its promoted data type is also a Long value. For computing aggregation
|
||||
* function, this class is used to find the datatype of the cell value. Client
|
||||
* is supposed to instantiate it and passed along as a parameter. See
|
||||
* {@link TestAggregateProtocol} methods for its sample usage.
|
||||
* TestAggregateProtocol methods for its sample usage.
|
||||
* Its methods handle null arguments gracefully.
|
||||
*/
|
||||
public class LongColumnInterpreter implements ColumnInterpreter<Long, Long> {
|
||||
|
|
|
@ -80,11 +80,10 @@ public interface AggregateProtocol extends CoprocessorProtocol {
|
|||
<T, S> S getSum(ColumnInterpreter<T, S> ci, Scan scan) throws IOException;
|
||||
|
||||
/**
|
||||
* Gives the row count for the given column family and column qualifier, in
|
||||
* the given row range as defined in the Scan object.
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return Row count for the given column family and column qualifier, in
|
||||
* the given row range as defined in the Scan object.
|
||||
* @throws IOException
|
||||
*/
|
||||
<T, S> long getRowNum(ColumnInterpreter<T, S> ci, Scan scan)
|
||||
|
@ -104,7 +103,7 @@ public interface AggregateProtocol extends CoprocessorProtocol {
|
|||
* type.
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return Average
|
||||
* @throws IOException
|
||||
*/
|
||||
<T, S> Pair<S, Long> getAvg(ColumnInterpreter<T, S> ci, Scan scan)
|
||||
|
@ -120,7 +119,7 @@ public interface AggregateProtocol extends CoprocessorProtocol {
|
|||
* deviation is square root of variance.
|
||||
* @param ci
|
||||
* @param scan
|
||||
* @return
|
||||
* @return STD
|
||||
* @throws IOException
|
||||
*/
|
||||
<T, S> Pair<List<S>, Long> getStd(ColumnInterpreter<T, S> ci, Scan scan)
|
||||
|
|
|
@ -42,14 +42,15 @@ import org.apache.hadoop.io.Writable;
|
|||
* <S>. There is a conversion method
|
||||
* {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and
|
||||
* returns a <S> type.
|
||||
* @param <T, S>: T - cell value data type, S - promoted data type
|
||||
* @param <T> Cell value data type
|
||||
* @param <S> Promoted data type
|
||||
*/
|
||||
public interface ColumnInterpreter<T, S> extends Writable {
|
||||
|
||||
/**
|
||||
* @param colFamily
|
||||
* @param colQualifier
|
||||
* @param value
|
||||
* @param kv
|
||||
* @return value of type T
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -57,44 +58,39 @@ public interface ColumnInterpreter<T, S> extends Writable {
|
|||
throws IOException;
|
||||
|
||||
/**
|
||||
* returns sum or non null value among (if either of them is null); otherwise
|
||||
* returns a null.
|
||||
* @param l1
|
||||
* @param l2
|
||||
* @return
|
||||
* @return sum or non null value among (if either of them is null); otherwise
|
||||
* returns a null.
|
||||
*/
|
||||
public S add(S l1, S l2);
|
||||
|
||||
/**
|
||||
* returns the maximum value for this type T
|
||||
* @return
|
||||
* @return max
|
||||
*/
|
||||
|
||||
T getMaxValue();
|
||||
|
||||
/**
|
||||
* @return
|
||||
*/
|
||||
|
||||
T getMinValue();
|
||||
|
||||
/**
|
||||
* @param o1
|
||||
* @param o2
|
||||
* @return
|
||||
* @return multiplication
|
||||
*/
|
||||
S multiply(S o1, S o2);
|
||||
|
||||
/**
|
||||
* @param o
|
||||
* @return
|
||||
* @return increment
|
||||
*/
|
||||
S increment(S o);
|
||||
|
||||
/**
|
||||
* provides casting opportunity between the data types.
|
||||
* @param o
|
||||
* @return
|
||||
* @return cast
|
||||
*/
|
||||
S castToReturnType(T o);
|
||||
|
||||
|
@ -109,10 +105,10 @@ public interface ColumnInterpreter<T, S> extends Writable {
|
|||
|
||||
/**
|
||||
* used for computing average of <S> data values. Not providing the divide
|
||||
* method that takes two <S> values as it si not needed as of now.
|
||||
* method that takes two <S> values as it is not needed as of now.
|
||||
* @param o
|
||||
* @param l
|
||||
* @return
|
||||
* @return Average
|
||||
*/
|
||||
double divideForAvg(S o, Long l);
|
||||
}
|
|
@ -220,7 +220,7 @@ public interface MasterObserver extends Coprocessor {
|
|||
/**
|
||||
* Called prior to unassigning a given region.
|
||||
* @param ctx the environment to interact with the framework and master
|
||||
* @param regionName the name of the region
|
||||
* @param regionInfo
|
||||
* @param force whether to force unassignment or not
|
||||
*/
|
||||
void preUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
|
@ -229,7 +229,7 @@ public interface MasterObserver extends Coprocessor {
|
|||
/**
|
||||
* Called after the region unassignment has been requested.
|
||||
* @param ctx the environment to interact with the framework and master
|
||||
* @param regionName the name of the region
|
||||
* @param regionInfo
|
||||
* @param force whether to force unassignment or not
|
||||
*/
|
||||
void postUnassign(final ObserverContext<MasterCoprocessorEnvironment> ctx,
|
||||
|
|
|
@ -154,7 +154,7 @@ import org.apache.hadoop.hbase.client.Get;
|
|||
// and intercept preXXX() method to check user privilege for the given table
|
||||
// and column family.
|
||||
public class AccessControlCoprocessor extends BaseRegionObserverCoprocessor {
|
||||
@Override
|
||||
// @Override
|
||||
public Get preGet(CoprocessorEnvironment e, Get get)
|
||||
throws CoprocessorException {
|
||||
|
||||
|
@ -208,9 +208,9 @@ extends CoprocessorProtocol {
|
|||
// Aggregation implementation at a region.
|
||||
public static class ColumnAggregationEndpoint extends BaseEndpointCoprocessor
|
||||
implements ColumnAggregationProtocol {
|
||||
@Override
|
||||
// Scan the region by the given family and qualifier. Return the aggregation
|
||||
// result.
|
||||
// @Override
|
||||
// Scan the region by the given family and qualifier. Return the aggregation
|
||||
// result.
|
||||
public int sum(byte[] family, byte[] qualifier)
|
||||
throws IOException {
|
||||
// aggregate at each region
|
||||
|
|
|
@ -108,7 +108,7 @@ public class RegionTransitionData implements Writable {
|
|||
*
|
||||
* @param eventType type of event
|
||||
* @param regionName name of region as per <code>HRegionInfo#getRegionName()</code>
|
||||
* @param origin Originating {@link ServerName}
|
||||
* @param serverName Originating {@link ServerName}
|
||||
* @param payload Payload examples include the daughters involved in a
|
||||
* {@link EventType#RS_ZK_REGION_SPLIT}. Can be null
|
||||
*/
|
||||
|
|
|
@ -47,7 +47,7 @@ public class BitComparator extends WritableByteArrayComparable {
|
|||
/**
|
||||
* Constructor
|
||||
* @param value value
|
||||
* @param BitwiseOp bitOperator - the operator to use on the bit comparison
|
||||
* @param bitOperator operator to use on the bit comparison
|
||||
*/
|
||||
public BitComparator(byte[] value, BitwiseOp bitOperator) {
|
||||
super(value);
|
||||
|
|
|
@ -19,26 +19,19 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.CharacterCodingException;
|
||||
import java.util.TreeSet;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Stack;
|
||||
import java.util.EmptyStackException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Set;
|
||||
import java.util.Stack;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.filter.ParseConstants;
|
||||
|
||||
import org.apache.hadoop.hbase.filter.FilterList;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
import java.lang.ArrayIndexOutOfBoundsException;
|
||||
import java.lang.ClassCastException;
|
||||
import java.lang.reflect.*;
|
||||
import java.util.EmptyStackException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* This class allows a user to specify a filter via a string
|
||||
|
@ -289,7 +282,7 @@ public class ParseFilter {
|
|||
/**
|
||||
* Returns the arguments of the filter from the filter string
|
||||
* <p>
|
||||
* @param filter_string filter string given by the user
|
||||
* @param filterStringAsByteArray filter string given by the user
|
||||
* @return an ArrayList containing the arguments of the filter in the filter string
|
||||
*/
|
||||
public static ArrayList<byte []> getFilterArguments (byte [] filterStringAsByteArray) {
|
||||
|
@ -361,7 +354,6 @@ public class ParseFilter {
|
|||
* @param operatorStack the stack containing the operators and parenthesis
|
||||
* @param filterStack the stack containing the filters
|
||||
* @param operator the operator found while parsing the filterString
|
||||
* @return returns the filterStack after evaluating the stack
|
||||
*/
|
||||
public void reduce(Stack<ByteBuffer> operatorStack,
|
||||
Stack<Filter> filterStack,
|
||||
|
@ -646,7 +638,7 @@ public class ParseFilter {
|
|||
* byte array representing abc
|
||||
* <p>
|
||||
* @param quotedByteArray the quoted byte array
|
||||
* @return
|
||||
* @return Unquoted byte array
|
||||
*/
|
||||
public static byte [] removeQuotesFromByteArray (byte [] quotedByteArray) {
|
||||
if (quotedByteArray == null ||
|
||||
|
@ -665,7 +657,7 @@ public class ParseFilter {
|
|||
* Converts an int expressed in a byte array to an actual int
|
||||
* <p>
|
||||
* This doesn't use Bytes.toInt because that assumes
|
||||
* that there will be {@link #SIZEOF_INT} bytes available.
|
||||
* that there will be {@link Bytes#SIZEOF_INT} bytes available.
|
||||
* <p>
|
||||
* @param numberAsByteArray the int value expressed as a byte array
|
||||
* @return the int value
|
||||
|
@ -688,7 +680,7 @@ public class ParseFilter {
|
|||
* Converts a long expressed in a byte array to an actual long
|
||||
* <p>
|
||||
* This doesn't use Bytes.toLong because that assumes
|
||||
* that there will be {@link #SIZEOF_LONG} bytes available.
|
||||
* that there will be {@link Bytes#SIZEOF_INT} bytes available.
|
||||
* <p>
|
||||
* @param numberAsByteArray the long value expressed as a byte array
|
||||
* @return the long value
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
|
||||
/**
|
||||
* This filter is used to filter based on the key. It takes an operator
|
||||
* (equal, greater, not equal, etc) and a byte [] comparator for the row,
|
||||
|
|
|
@ -55,7 +55,7 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
/**
|
||||
* @param fs
|
||||
* @param p
|
||||
* @param c
|
||||
* @param cacheConf
|
||||
* @param r
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -225,7 +225,7 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
}
|
||||
|
||||
/**
|
||||
* TODO left from {@HFile} version 1: move this to StoreFile after Ryan's
|
||||
* TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's
|
||||
* patch goes in to eliminate {@link KeyValue} here.
|
||||
*
|
||||
* @return the first row key, or null if the file is empty.
|
||||
|
@ -239,7 +239,7 @@ public abstract class AbstractHFileReader implements HFile.Reader {
|
|||
}
|
||||
|
||||
/**
|
||||
* TODO left from {@HFile} version 1: move this to StoreFile after
|
||||
* TODO left from {@link HFile} version 1: move this to StoreFile after
|
||||
* Ryan's patch goes in to eliminate {@link KeyValue} here.
|
||||
*
|
||||
* @return the last row key, or null if the file is empty.
|
||||
|
|
|
@ -74,7 +74,7 @@ public interface BlockCache {
|
|||
|
||||
/**
|
||||
* Get the statistics for this block cache.
|
||||
* @return
|
||||
* @return Stats
|
||||
*/
|
||||
public CacheStats getStats();
|
||||
|
||||
|
|
|
@ -246,18 +246,14 @@ public class HFile {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the factory to be used to create {@link HFile} writers. Should
|
||||
* always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but
|
||||
* can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
|
||||
* Returns the factory to be used to create {@link HFile} writers.
|
||||
*/
|
||||
public static final WriterFactory getWriterFactory(Configuration conf) {
|
||||
return HFile.getWriterFactory(conf, new CacheConfig(conf));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the factory to be used to create {@link HFile} writers. Should
|
||||
* always be {@link HFileWriterV2#WRITER_FACTORY_V2} in production, but
|
||||
* can also be {@link HFileWriterV1#WRITER_FACTORY_V1} in testing.
|
||||
* Returns the factory to be used to create {@link HFile} writers
|
||||
*/
|
||||
public static final WriterFactory getWriterFactory(Configuration conf,
|
||||
CacheConfig cacheConf) {
|
||||
|
|
|
@ -495,10 +495,10 @@ public class HFileBlock implements Cacheable, HFileBlockInfo {
|
|||
* <ul>
|
||||
* <li>Construct an {@link HFileBlock.Writer}, providing a compression
|
||||
* algorithm
|
||||
* <li>Call {@link Writer#startWriting(BlockType)} and get a data stream to
|
||||
* <li>Call {@link Writer#startWriting(BlockType, boolean)} and get a data stream to
|
||||
* write to
|
||||
* <li>Write your data into the stream
|
||||
* <li>Call {@link Writer#writeHeaderAndData()} as many times as you need to
|
||||
* <li>Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to
|
||||
* store the serialized block into an external stream, or call
|
||||
* {@link Writer#getHeaderAndData()} to get it as a byte array.
|
||||
* <li>Repeat to write more blocks
|
||||
|
@ -586,8 +586,6 @@ public class HFileBlock implements Cacheable, HFileBlockInfo {
|
|||
private long prevOffset;
|
||||
|
||||
/**
|
||||
* @param blockType
|
||||
* block type to create
|
||||
* @param compressionAlgorithm
|
||||
* compression algorithm to use
|
||||
*/
|
||||
|
@ -717,7 +715,7 @@ public class HFileBlock implements Cacheable, HFileBlockInfo {
|
|||
}
|
||||
|
||||
/**
|
||||
* Similar to {@link #writeHeaderAndData(DataOutputStream)}, but records
|
||||
* Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records
|
||||
* the offset of this block so that it can be referenced in the next block
|
||||
* of the same type.
|
||||
*
|
||||
|
@ -864,7 +862,7 @@ public class HFileBlock implements Cacheable, HFileBlockInfo {
|
|||
}
|
||||
|
||||
/**
|
||||
* Similar to {@link #getUncompressedDataWithHeader()} but returns a byte
|
||||
* Similar to {@link #getUncompressedBufferWithHeader()} but returns a byte
|
||||
* buffer.
|
||||
*
|
||||
* @return uncompressed block for caching on write in the form of a buffer
|
||||
|
@ -1084,20 +1082,15 @@ public class HFileBlock implements Cacheable, HFileBlockInfo {
|
|||
/**
|
||||
* Decompresses data from the given stream using the configured compression
|
||||
* algorithm.
|
||||
*
|
||||
* @param boundedStream
|
||||
* @param dest
|
||||
* @param destOffset
|
||||
* @param bufferedBoundedStream
|
||||
* a stream to read compressed data from, bounded to the exact
|
||||
* amount of compressed data
|
||||
* @param compressedSize
|
||||
* compressed data size, header not included
|
||||
* @param uncompressedSize
|
||||
* uncompressed data size, header not included
|
||||
* @param header
|
||||
* the header to include before the decompressed data, or null.
|
||||
* Only the first {@link HFileBlock#HEADER_SIZE} bytes of the
|
||||
* buffer are included.
|
||||
* @return the byte buffer containing the given header (optionally) and the
|
||||
* decompressed data
|
||||
* @throws IOException
|
||||
*/
|
||||
protected void decompress(byte[] dest, int destOffset,
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.hadoop.io.WritableUtils;
|
|||
* Examples of how to use the block index writer can be found in
|
||||
* {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how
|
||||
* to use the reader can be found in {@link HFileReaderV2} and
|
||||
* {@link TestHFileBlockIndex}.
|
||||
* TestHFileBlockIndex.
|
||||
*/
|
||||
public class HFileBlockIndex {
|
||||
|
||||
|
@ -62,7 +62,7 @@ public class HFileBlockIndex {
|
|||
|
||||
/**
|
||||
* The maximum size guideline for index blocks (both leaf, intermediate, and
|
||||
* root). If not specified, {@link #DEFAULT_MAX_CHUNK_SIZE} is used.
|
||||
* root). If not specified, <code>DEFAULT_MAX_CHUNK_SIZE</code> is used.
|
||||
*/
|
||||
public static final String MAX_CHUNK_SIZE_KEY = "hfile.index.block.max.size";
|
||||
|
||||
|
@ -913,8 +913,6 @@ public class HFileBlockIndex {
|
|||
* blocks, so the non-root index format is used.
|
||||
*
|
||||
* @param out
|
||||
* @param position The beginning offset of the inline block in the file not
|
||||
* include the header.
|
||||
*/
|
||||
@Override
|
||||
public void writeInlineBlock(DataOutput out) throws IOException {
|
||||
|
|
|
@ -56,7 +56,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
* stream.
|
||||
* @param size Length of the stream.
|
||||
* @param cacheConf cache references and configuration
|
||||
* @throws IOException
|
||||
*/
|
||||
public HFileReaderV1(Path path, FixedFileTrailer trailer,
|
||||
final FSDataInputStream fsdis, final long size,
|
||||
|
@ -81,7 +80,7 @@ public class HFileReaderV1 extends AbstractHFileReader {
|
|||
* Read in the index and file info.
|
||||
*
|
||||
* @return A map of fileinfo data.
|
||||
* @see {@link Writer#appendFileInfo(byte[], byte[])}.
|
||||
* @see Writer#appendFileInfo(byte[], byte[])
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
|
|
|
@ -213,7 +213,8 @@ public class HFileReaderV2 extends AbstractHFileReader {
|
|||
* Read in a file block.
|
||||
*
|
||||
* @param dataBlockOffset offset to read.
|
||||
* @param onDiskSize size of the block
|
||||
* @param onDiskBlockSize size of the block
|
||||
* @param cacheBlock
|
||||
* @param pread Use positional read instead of seek+read (positional is better
|
||||
* doing random reads whereas seek+read is better scanning).
|
||||
* @param isCompaction is this block being read as part of a compaction
|
||||
|
|
|
@ -56,7 +56,6 @@ public interface InlineBlockWriter {
|
|||
* @param offset the offset of the block in the stream
|
||||
* @param onDiskSize the on-disk size of the block
|
||||
* @param uncompressedSize the uncompressed size of the block
|
||||
* @param rawSize
|
||||
*/
|
||||
void blockWritten(long offset, int onDiskSize, int uncompressedSize);
|
||||
|
||||
|
@ -69,5 +68,4 @@ public interface InlineBlockWriter {
|
|||
* @return true if inline blocks produced by this writer should be cached
|
||||
*/
|
||||
boolean cacheOnWrite();
|
||||
|
||||
}
|
|
@ -103,7 +103,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
|
|||
* The second list is blocksize of the slabs in bytes. (E.g. the slab holds
|
||||
* blocks of this size).
|
||||
*
|
||||
* @param Configuration file.
|
||||
* @param conf Configuration file.
|
||||
*/
|
||||
public void addSlabByConf(Configuration conf) {
|
||||
// Proportions we allocate to each slab of the total size.
|
||||
|
@ -229,7 +229,8 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
|
|||
/**
|
||||
* Get the buffer of the block with the specified name.
|
||||
*
|
||||
* @param blockName block name
|
||||
* @param key
|
||||
* @param caching
|
||||
* @return buffer of specified block name, or null if not in cache
|
||||
*/
|
||||
public Cacheable getBlock(String key, boolean caching) {
|
||||
|
|
|
@ -48,7 +48,7 @@ public interface Delayable {
|
|||
/**
|
||||
* Signal that the RPC server is now allowed to send the response.
|
||||
* @param result The value to return to the caller. If the corresponding
|
||||
* {@link #delayResponse(boolean)} specified that the return value should
|
||||
* delay response specified that the return value should
|
||||
* not be delayed, this parameter must be null.
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -134,9 +134,9 @@ public abstract class HBaseServer implements RpcServer {
|
|||
}
|
||||
|
||||
/** Returns the server instance called under or null. May be called under
|
||||
* {@link #call(Class, Writable, long)} implementations, and under {@link Writable}
|
||||
* methods of paramters and return values. Permits applications to access
|
||||
* the server context.
|
||||
* {@link #call(Class, Writable, long, MonitoredRPCHandler)} implementations,
|
||||
* and under {@link Writable} methods of paramters and return values.
|
||||
* Permits applications to access the server context.
|
||||
* @return HBaseServer
|
||||
*/
|
||||
public static RpcServer get() {
|
||||
|
@ -860,7 +860,6 @@ public abstract class HBaseServer implements RpcServer {
|
|||
// Processes one response. Returns true if there are no more pending
|
||||
// data for this channel.
|
||||
//
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
private boolean processResponse(final LinkedList<Call> responseQueue,
|
||||
boolean inHandler) throws IOException {
|
||||
boolean error = true;
|
||||
|
@ -1280,14 +1279,15 @@ public abstract class HBaseServer implements RpcServer {
|
|||
|
||||
}
|
||||
|
||||
|
||||
private Function<Writable,Integer> qosFunction = null;
|
||||
|
||||
/**
|
||||
* Gets the QOS level for this call. If it is higher than the highPriorityLevel and there
|
||||
* are priorityHandlers available it will be processed in it's own thread set.
|
||||
*
|
||||
* @param param
|
||||
* @return priority, higher is better
|
||||
* @param newFunc
|
||||
*/
|
||||
private Function<Writable,Integer> qosFunction = null;
|
||||
@Override
|
||||
public void setQosFunction(Function<Writable, Integer> newFunc) {
|
||||
qosFunction = newFunc;
|
||||
|
|
|
@ -159,7 +159,7 @@ public class InputSampler<K,V> extends Configured implements Tool {
|
|||
* native Hadoop ones (We'll throw a ClassNotFoundException if end up in
|
||||
* here when we should be using native hadoop TotalOrderPartitioner).
|
||||
* @param job
|
||||
* @return
|
||||
* @return Context
|
||||
* @throws IOException
|
||||
*/
|
||||
public static TaskAttemptContext getTaskAttemptContext(final Job job)
|
||||
|
|
|
@ -276,7 +276,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
|||
* Main processing loop for the HMaster.
|
||||
* <ol>
|
||||
* <li>Block until becoming active master
|
||||
* <li>Finish initialization via {@link #finishInitialization()}
|
||||
* <li>Finish initialization via finishInitialization(MonitoredTask)
|
||||
* <li>Enter loop until we are stopped
|
||||
* <li>Stop services and perform cleanup once stopped
|
||||
* </ol>
|
||||
|
@ -881,7 +881,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
|
|||
}
|
||||
|
||||
/**
|
||||
* Switch for the background {@link CatalogJanitor} thread.
|
||||
* Switch for the background CatalogJanitor thread.
|
||||
* Used for testing. The thread will continue to run. It will just be a noop
|
||||
* if disabled.
|
||||
* @param b If false, the catalog janitor won't do anything.
|
||||
|
|
|
@ -61,7 +61,7 @@ public interface LoadBalancer extends Configurable {
|
|||
/**
|
||||
* Perform the major balance operation
|
||||
* @param clusterState
|
||||
* @return
|
||||
* @return List of plans
|
||||
*/
|
||||
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState);
|
||||
|
||||
|
@ -69,7 +69,7 @@ public interface LoadBalancer extends Configurable {
|
|||
* Perform a Round Robin assignment of regions.
|
||||
* @param regions
|
||||
* @param servers
|
||||
* @return
|
||||
* @return Map of servername to regioninfos
|
||||
*/
|
||||
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, List<ServerName> servers);
|
||||
|
||||
|
@ -77,7 +77,7 @@ public interface LoadBalancer extends Configurable {
|
|||
* Assign regions to the previously hosting region server
|
||||
* @param regions
|
||||
* @param servers
|
||||
* @return
|
||||
* @return List of plans
|
||||
*/
|
||||
public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions, List<ServerName> servers);
|
||||
|
||||
|
@ -85,14 +85,14 @@ public interface LoadBalancer extends Configurable {
|
|||
* Sync assign a region
|
||||
* @param regions
|
||||
* @param servers
|
||||
* @return
|
||||
* @return Map regioninfos to servernames
|
||||
*/
|
||||
public Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions, List<ServerName> servers);
|
||||
|
||||
/**
|
||||
* Get a random region server from the list
|
||||
* @param servers
|
||||
* @return
|
||||
* @return Servername
|
||||
*/
|
||||
public ServerName randomAssignment(List<ServerName> servers);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ public class LoadBalancerFactory {
|
|||
/**
|
||||
* Create a loadblanacer from the given conf.
|
||||
* @param conf
|
||||
* @return
|
||||
* @return A {@link LoadBalancer}
|
||||
*/
|
||||
public static LoadBalancer getLoadBalancer(Configuration conf) {
|
||||
|
||||
|
|
|
@ -164,15 +164,13 @@ public class MasterFileSystem {
|
|||
|
||||
/**
|
||||
* @return HBase root dir.
|
||||
* @throws IOException
|
||||
*/
|
||||
public Path getRootDir() {
|
||||
return this.rootdir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the unique identifier generated for this cluster
|
||||
* @return
|
||||
* @return The unique identifier generated for this cluster
|
||||
*/
|
||||
public String getClusterId() {
|
||||
return clusterId;
|
||||
|
|
|
@ -243,7 +243,7 @@ public class ServerManager {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param serverName
|
||||
* @param address
|
||||
* @return HServerLoad if serverName is known else null
|
||||
* @deprecated Use {@link #getLoad(HServerAddress)}
|
||||
*/
|
||||
|
|
|
@ -68,8 +68,8 @@ import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.*
|
|||
* znode is created under /hbase/splitlog. SplitLogWorkers race to grab a task.
|
||||
*
|
||||
* SplitLogManager monitors the task znodes that it creates using the
|
||||
* {@link #timeoutMonitor} thread. If a task's progress is slow then
|
||||
* {@link #resubmit(String, boolean)} will take away the task from the owner
|
||||
* timeoutMonitor thread. If a task's progress is slow then
|
||||
* resubmit(String, boolean) will take away the task from the owner
|
||||
* {@link SplitLogWorker} and the task will be
|
||||
* upforgrabs again. When the task is done then the task's znode is deleted by
|
||||
* SplitLogManager.
|
||||
|
@ -122,8 +122,6 @@ public class SplitLogManager extends ZooKeeperListener {
|
|||
* @param conf
|
||||
* @param stopper
|
||||
* @param serverName
|
||||
* @param services
|
||||
* @param service
|
||||
*/
|
||||
public SplitLogManager(ZooKeeperWatcher zkw, final Configuration conf,
|
||||
Stoppable stopper, String serverName) {
|
||||
|
@ -205,7 +203,7 @@ public class SplitLogManager extends ZooKeeperListener {
|
|||
* @throws IOException
|
||||
* if there was an error while splitting any log file
|
||||
* @return cumulative size of the logfiles split
|
||||
* @throws KeeperException
|
||||
* @throws IOException
|
||||
*/
|
||||
public long splitLogDistributed(final Path logDir) throws IOException {
|
||||
List<Path> logDirs = new ArrayList<Path>();
|
||||
|
@ -218,8 +216,7 @@ public class SplitLogManager extends ZooKeeperListener {
|
|||
* available worker region server. This method must only be called after the
|
||||
* region servers have been brought online.
|
||||
*
|
||||
* @param logDir
|
||||
* the log directory encoded with a region server name
|
||||
* @param logDirs
|
||||
* @throws IOException
|
||||
* if there was an error while splitting any log file
|
||||
* @return cumulative size of the logfiles split
|
||||
|
|
|
@ -56,7 +56,7 @@ public interface MonitoredTask extends Cloneable {
|
|||
/**
|
||||
* Public exposure of Object.clone() in order to allow clients to easily
|
||||
* capture current state.
|
||||
* @returns a copy of the object whose references will not change
|
||||
* @return a copy of the object whose references will not change
|
||||
*/
|
||||
public abstract MonitoredTask clone();
|
||||
|
||||
|
|
|
@ -48,8 +48,9 @@ public abstract class ThreadMonitoring {
|
|||
/**
|
||||
* Print all of the thread's information and stack traces.
|
||||
*
|
||||
* @param stream the stream to
|
||||
*
|
||||
* @param sb
|
||||
* @param info
|
||||
* @param indent
|
||||
*/
|
||||
public static void appendThreadInfo(StringBuilder sb,
|
||||
ThreadInfo info,
|
||||
|
|
|
@ -346,7 +346,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
/**
|
||||
* HRegion constructor. his constructor should only be used for testing and
|
||||
* extensions. Instances of HRegion should be instantiated with the
|
||||
* {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)} method.
|
||||
* {@link HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)} method.
|
||||
*
|
||||
*
|
||||
* @param tableDir qualified path of directory where region should be located,
|
||||
|
@ -364,7 +364,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* is new), then read them from the supplied path.
|
||||
* @param rsServices reference to {@link RegionServerServices} or null
|
||||
*
|
||||
* @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
|
||||
* @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)
|
||||
*/
|
||||
public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
|
||||
HRegionInfo regionInfo, final HTableDescriptor htd,
|
||||
|
@ -1161,7 +1161,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* @param status
|
||||
* @return true if the region needs compacting
|
||||
* @throws IOException
|
||||
* @see #internalFlushcache()
|
||||
* @see #internalFlushcache(MonitoredTask)
|
||||
*/
|
||||
protected boolean internalFlushcache(
|
||||
final HLog wal, final long myseqid, MonitoredTask status)
|
||||
|
@ -2585,7 +2585,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
|
||||
/**
|
||||
* Release the row lock!
|
||||
* @param lockid The lock ID to release.
|
||||
* @param lockId The lock ID to release.
|
||||
*/
|
||||
public void releaseRowLock(final Integer lockId) {
|
||||
HashedBytes rowKey = lockIds.remove(lockId);
|
||||
|
@ -2995,13 +2995,14 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
|
||||
/**
|
||||
* Open a Region.
|
||||
* @param info Info for region to be opened.
|
||||
* @param info Info for region to be opened
|
||||
* @param htd
|
||||
* @param wal HLog for region to use. This method will call
|
||||
* HLog#setSequenceNumber(long) passing the result of the call to
|
||||
* HRegion#getMinSequenceId() to ensure the log id is properly kept
|
||||
* up. HRegionStore does this every time it opens a new region.
|
||||
* @param conf
|
||||
* @param flusher An interface we can request flushes against.
|
||||
* @param rsServices An interface we can request flushes against.
|
||||
* @param reporter An interface we can report progress against.
|
||||
* @return new HRegion
|
||||
*
|
||||
|
@ -3565,7 +3566,6 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
*
|
||||
* @param append
|
||||
* @param lockid
|
||||
* @param returnResult
|
||||
* @param writeToWAL
|
||||
* @return new keyvalues after increment
|
||||
* @throws IOException
|
||||
|
|
|
@ -94,11 +94,10 @@ public interface KeyValueScanner {
|
|||
|
||||
/**
|
||||
* Does the real seek operation in case it was skipped by
|
||||
* {@link #seekToRowCol(KeyValue, boolean)}. Note that this function should
|
||||
* seekToRowCol(KeyValue, boolean) (TODO: Whats this?). Note that this function should
|
||||
* be never called on scanners that always do real seek operations (i.e. most
|
||||
* of the scanners). The easiest way to achieve this is to call
|
||||
* {@link #realSeekDone()} first.
|
||||
*/
|
||||
public void enforceSeek() throws IOException;
|
||||
|
||||
}
|
|
@ -210,8 +210,8 @@ public class RegionCoprocessorHost
|
|||
* {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost#postOpen()} are such hooks.
|
||||
*
|
||||
* See also {@link org.apache.hadoop.hbase.master.MasterCoprocessorHost#handleCoprocessorThrowable()}
|
||||
* @param env: The coprocessor that threw the exception.
|
||||
* @param e: The exception that was thrown.
|
||||
* @param env The coprocessor that threw the exception.
|
||||
* @param e The exception that was thrown.
|
||||
*/
|
||||
private void handleCoprocessorThrowableNoRethrow(
|
||||
final CoprocessorEnvironment env, final Throwable e) {
|
||||
|
@ -566,7 +566,6 @@ public class RegionCoprocessorHost
|
|||
/**
|
||||
* @param get the Get request
|
||||
* @param results the result set
|
||||
* @return the possibly transformed result set to use
|
||||
* @exception IOException Exception
|
||||
*/
|
||||
public void postGet(final Get get, final List<KeyValue> results)
|
||||
|
|
|
@ -548,8 +548,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
|
|||
* acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
|
||||
* guarantee that two workers will not be executing the same task therefore it
|
||||
* is better to have workers prepare the task and then have the
|
||||
* {@link SplitLogManager} commit the work in
|
||||
* {@link SplitLogManager.TaskFinisher}
|
||||
* {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
|
||||
*/
|
||||
static public interface TaskExecutor {
|
||||
static public enum Status {
|
||||
|
|
|
@ -61,7 +61,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|||
/**
|
||||
* Executes region split as a "transaction". Call {@link #prepare()} to setup
|
||||
* the transaction, {@link #execute(Server, RegionServerServices)} to run the
|
||||
* transaction and {@link #rollback(OnlineRegions)} to cleanup if execute fails.
|
||||
* transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails.
|
||||
*
|
||||
* <p>Here is an example of how you would use this class:
|
||||
* <pre>
|
||||
|
@ -145,9 +145,6 @@ public class SplitTransaction {
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param services So we can online new regions. If null, we'll skip onlining
|
||||
* (Useful testing).
|
||||
* @param c Configuration to use running split
|
||||
* @param r Region to split
|
||||
* @param splitrow Row to split around
|
||||
*/
|
||||
|
@ -430,8 +427,7 @@ public class SplitTransaction {
|
|||
* @param services Used to online/offline regions.
|
||||
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
|
||||
* @return Regions created
|
||||
* @throws KeeperException
|
||||
* @throws NodeExistsException
|
||||
* @throws IOException
|
||||
* @see #rollback(Server, RegionServerServices)
|
||||
*/
|
||||
public PairOfSameType<HRegion> execute(final Server server,
|
||||
|
|
|
@ -70,7 +70,7 @@ import com.google.common.collect.Ordering;
|
|||
/**
|
||||
* A Store data file. Stores usually have one or more of these files. They
|
||||
* are produced by flushing the memstore to disk. To
|
||||
* create, call {@link #createWriter(FileSystem, Path, int, Configuration)}
|
||||
* create, call {@link #createWriter(FileSystem, Path, int, Configuration, CacheConfig)}
|
||||
* and append data. Be sure to add any metadata before calling close on the
|
||||
* Writer (Use the appendMetadata convenience methods). On close, a StoreFile
|
||||
* is sitting in the Filesystem. To refer to it, create a StoreFile instance
|
||||
|
@ -402,7 +402,7 @@ public class StoreFile {
|
|||
* helper function to compute HDFS blocks distribution of a given file.
|
||||
* For reference file, it is an estimate
|
||||
* @param fs The FileSystem
|
||||
* @param o The path of the file
|
||||
* @param p The path of the file
|
||||
* @return HDFS blocks distribution
|
||||
*/
|
||||
static public HDFSBlocksDistribution computeHDFSBlockDistribution(
|
||||
|
@ -531,7 +531,6 @@ public class StoreFile {
|
|||
|
||||
/**
|
||||
* @return Current reader. Must call createReader first else returns null.
|
||||
* @throws IOException
|
||||
* @see #createReader()
|
||||
*/
|
||||
public Reader getReader() {
|
||||
|
@ -539,7 +538,7 @@ public class StoreFile {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param b
|
||||
* @param evictOnClose
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void closeReader(boolean evictOnClose)
|
||||
|
@ -855,7 +854,6 @@ public class StoreFile {
|
|||
* If the timeRangeTracker is not set,
|
||||
* update TimeRangeTracker to include the timestamp of this key
|
||||
* @param kv
|
||||
* @throws IOException
|
||||
*/
|
||||
public void trackTimestamps(final KeyValue kv) {
|
||||
if (KeyValue.Type.Put.getCode() == kv.getType()) {
|
||||
|
@ -1243,7 +1241,7 @@ public class StoreFile {
|
|||
|
||||
/**
|
||||
* A method for checking Bloom filters. Called directly from
|
||||
* {@link StoreFileScanner} in case of a multi-column query.
|
||||
* StoreFileScanner in case of a multi-column query.
|
||||
*
|
||||
* @param row
|
||||
* @param rowOffset
|
||||
|
@ -1251,7 +1249,7 @@ public class StoreFile {
|
|||
* @param col
|
||||
* @param colOffset
|
||||
* @param colLen
|
||||
* @return
|
||||
* @return True if passes
|
||||
*/
|
||||
public boolean passesGeneralBloomFilter(byte[] row, int rowOffset,
|
||||
int rowLen, byte[] col, int colOffset, int colLen) {
|
||||
|
|
|
@ -46,8 +46,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
|
|||
* <p>
|
||||
* This class has a number of metrics variables that are publicly accessible;
|
||||
* these variables (objects) have methods to update their values;
|
||||
* for example:
|
||||
* <p> {@link #rpcQueueTime}.inc(time)
|
||||
* for example: rpcQueueTime.inc(time)
|
||||
*
|
||||
*/
|
||||
public class RegionServerDynamicMetrics implements Updater {
|
||||
|
|
|
@ -1441,7 +1441,7 @@ public class HLog implements Syncable {
|
|||
*
|
||||
* In this method, by removing the entry in lastSeqWritten for the region
|
||||
* being flushed we ensure that the next edit inserted in this region will be
|
||||
* correctly recorded in {@link #append(HRegionInfo, HLogKey, WALEdit)}. The
|
||||
* correctly recorded in {@link #append(HRegionInfo, byte[], WALEdit, long, HTableDescriptor)} The
|
||||
* lsn of the earliest in-memory lsn - which is now in the memstore snapshot -
|
||||
* is saved temporarily in the lastSeqWritten map while the flush is active.
|
||||
*
|
||||
|
@ -1449,7 +1449,7 @@ public class HLog implements Syncable {
|
|||
* {@link #completeCacheFlush(byte[], byte[], long, boolean)} (byte[],
|
||||
* byte[], long)}
|
||||
* @see #completeCacheFlush(byte[], byte[], long, boolean)
|
||||
* @see #abortCacheFlush()
|
||||
* @see #abortCacheFlush(byte[])
|
||||
*/
|
||||
public long startCacheFlush(final byte[] encodedRegionName) {
|
||||
this.cacheFlushLock.lock();
|
||||
|
@ -1753,29 +1753,6 @@ public class HLog implements Syncable {
|
|||
System.err.println(" For example: HLog --split hdfs://example.com:9000/hbase/.logs/DIR");
|
||||
}
|
||||
|
||||
private static void dump(final Configuration conf, final Path p)
|
||||
throws IOException {
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
if (!fs.exists(p)) {
|
||||
throw new FileNotFoundException(p.toString());
|
||||
}
|
||||
if (!fs.isFile(p)) {
|
||||
throw new IOException(p + " is not a file");
|
||||
}
|
||||
Reader log = getReader(fs, p, conf);
|
||||
try {
|
||||
int count = 0;
|
||||
HLog.Entry entry;
|
||||
while ((entry = log.next()) != null) {
|
||||
System.out.println("#" + count + ", pos=" + log.getPosition() + " " +
|
||||
entry.toString());
|
||||
count++;
|
||||
}
|
||||
} finally {
|
||||
log.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void split(final Configuration conf, final Path p)
|
||||
throws IOException {
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
|
|
|
@ -71,7 +71,7 @@ public class HLogKey implements WritableComparable<HLogKey> {
|
|||
* @param tablename - name of table
|
||||
* @param logSeqNum - log sequence number
|
||||
* @param now Time at which this edit was written.
|
||||
* @param UUID of the cluster (used in Replication)
|
||||
* @param clusterId of the cluster (used in Replication)
|
||||
*/
|
||||
public HLogKey(final byte [] encodedRegionName, final byte [] tablename,
|
||||
long logSeqNum, final long now, UUID clusterId) {
|
||||
|
|
|
@ -37,8 +37,7 @@ import java.nio.ByteBuffer;
|
|||
* When creating the filter, the sender can choose its desired point in a
|
||||
* trade-off between the false positive rate and the size.
|
||||
*
|
||||
* @see {@link BloomFilterWriter} for the ability to add elements to a Bloom
|
||||
* filter
|
||||
* @see BloomFilterWriter for the ability to add elements to a Bloom filter
|
||||
*/
|
||||
public interface BloomFilter extends BloomFilterBase {
|
||||
|
||||
|
@ -59,5 +58,4 @@ public interface BloomFilter extends BloomFilterBase {
|
|||
* and thus allows a null byte buffer to be passed to contains()
|
||||
*/
|
||||
boolean supportsAutoLoading();
|
||||
|
||||
}
|
|
@ -161,11 +161,11 @@ public final class BloomFilterFactory {
|
|||
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
|
||||
*
|
||||
* @param conf
|
||||
* @param cacheConf
|
||||
* @param bloomType
|
||||
* @param maxKeys an estimate of the number of keys we expect to insert.
|
||||
* Irrelevant if compound Bloom filters are enabled.
|
||||
* @param writer the HFile writer
|
||||
* @param bloomErrorRate
|
||||
* @return the new Bloom filter, or null in case Bloom filters are disabled
|
||||
* or when failed to create one.
|
||||
*/
|
||||
|
@ -231,10 +231,10 @@ public final class BloomFilterFactory {
|
|||
* Creates a new Delete Family Bloom filter at the time of
|
||||
* {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
|
||||
* @param conf
|
||||
* @param cacheConf
|
||||
* @param maxKeys an estimate of the number of keys we expect to insert.
|
||||
* Irrelevant if compound Bloom filters are enabled.
|
||||
* @param writer the HFile writer
|
||||
* @param bloomErrorRate
|
||||
* @return the new Bloom filter, or null in case Bloom filters are disabled
|
||||
* or when failed to create one.
|
||||
*/
|
||||
|
|
|
@ -164,8 +164,7 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter {
|
|||
* @param bitSize
|
||||
* @param errorRate
|
||||
* @return maximum number of keys that can be inserted into the Bloom filter
|
||||
* @see {@link #computeMaxKeys(long, double, int)} for a more precise
|
||||
* estimate
|
||||
* @see #computeMaxKeys(long, double, int) for a more precise estimate
|
||||
*/
|
||||
public static long idealMaxKeys(long bitSize, double errorRate) {
|
||||
// The reason we need to use floor here is that otherwise we might put
|
||||
|
@ -227,7 +226,7 @@ public class ByteBloomFilter implements BloomFilter, BloomFilterWriter {
|
|||
*
|
||||
* @param bitSize
|
||||
* @param foldFactor
|
||||
* @return
|
||||
* @return Foldable byte size
|
||||
*/
|
||||
public static int computeFoldableByteSize(long bitSize, int foldFactor) {
|
||||
long byteSizeLong = (bitSize + 7) / 8;
|
||||
|
|
|
@ -759,7 +759,7 @@ public class Bytes {
|
|||
* This method will get a sequence of bytes from pos -> limit,
|
||||
* but will restore pos after.
|
||||
* @param buf
|
||||
* @return
|
||||
* @return byte array
|
||||
*/
|
||||
public static byte[] getBytes(ByteBuffer buf) {
|
||||
int savedPos = buf.position();
|
||||
|
|
|
@ -71,7 +71,7 @@ public class CompoundBloomFilterBase implements BloomFilterBase {
|
|||
|
||||
/**
|
||||
* Prepare an ordered pair of row and qualifier to be compared using
|
||||
* {@link KeyValue.KeyComparator}. This is only used for row-column Bloom
|
||||
* KeyValue.KeyComparator. This is only used for row-column Bloom
|
||||
* filters.
|
||||
*/
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,17 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -38,23 +49,11 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Utility methods for interacting with the underlying file system.
|
||||
*/
|
||||
|
@ -523,7 +522,7 @@ public abstract class FSUtils {
|
|||
/**
|
||||
* Compute HDFS blocks distribution of a given file, or a portion of the file
|
||||
* @param fs file system
|
||||
* @param FileStatus file status of the file
|
||||
* @param status file status of the file
|
||||
* @param start start position of the portion
|
||||
* @param length length of the portion
|
||||
* @return The HDFS blocks distribution
|
||||
|
@ -1104,7 +1103,7 @@ public abstract class FSUtils {
|
|||
/**
|
||||
* Update table descriptor
|
||||
* @param fs
|
||||
* @param conf
|
||||
* @param rootdir
|
||||
* @param hTableDescriptor
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -21,14 +21,10 @@ package org.apache.hadoop.hbase.util;
|
|||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.client.Action;
|
||||
|
@ -129,7 +125,7 @@ public class Objects {
|
|||
* Attempts to construct a text description of the given object, by
|
||||
* introspecting known classes and building a description of size.
|
||||
* @param obj
|
||||
* @return
|
||||
* @return Description
|
||||
*/
|
||||
public static String describeQuantity(Object obj) {
|
||||
StringBuilder str = new StringBuilder();
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.apache.zookeeper.data.Stat;
|
|||
* the create it will do a getChildren("/") and see "x-222-1", "x-542-30",
|
||||
* "x-352-109", x-333-110". The process will know that the original create
|
||||
* succeeded an the znode it created is "x-352-109".
|
||||
* @see http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling
|
||||
* @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling"
|
||||
*/
|
||||
public class RecoverableZooKeeper {
|
||||
private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class);
|
||||
|
@ -142,7 +142,7 @@ public class RecoverableZooKeeper {
|
|||
* exists is an idempotent operation. Retry before throw out exception
|
||||
* @param path
|
||||
* @param watcher
|
||||
* @return
|
||||
* @return A Stat instance
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -177,7 +177,7 @@ public class RecoverableZooKeeper {
|
|||
* exists is an idempotent operation. Retry before throw out exception
|
||||
* @param path
|
||||
* @param watch
|
||||
* @return
|
||||
* @return A Stat instance
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -212,7 +212,7 @@ public class RecoverableZooKeeper {
|
|||
* getChildren is an idempotent operation. Retry before throw out exception
|
||||
* @param path
|
||||
* @param watcher
|
||||
* @return
|
||||
* @return List of children znodes
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -247,7 +247,7 @@ public class RecoverableZooKeeper {
|
|||
* getChildren is an idempotent operation. Retry before throw out exception
|
||||
* @param path
|
||||
* @param watch
|
||||
* @return
|
||||
* @return List of children znodes
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -283,7 +283,7 @@ public class RecoverableZooKeeper {
|
|||
* @param path
|
||||
* @param watcher
|
||||
* @param stat
|
||||
* @return
|
||||
* @return Data
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -320,7 +320,7 @@ public class RecoverableZooKeeper {
|
|||
* @param path
|
||||
* @param watch
|
||||
* @param stat
|
||||
* @return
|
||||
* @return Data
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -359,7 +359,7 @@ public class RecoverableZooKeeper {
|
|||
* @param path
|
||||
* @param data
|
||||
* @param version
|
||||
* @return
|
||||
* @return Stat instance
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
@ -427,7 +427,7 @@ public class RecoverableZooKeeper {
|
|||
* @param data
|
||||
* @param acl
|
||||
* @param createMode
|
||||
* @return
|
||||
* @return Path
|
||||
* @throws KeeperException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.master.ServerManager;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
||||
/**
|
||||
|
@ -41,7 +40,7 @@ import org.apache.zookeeper.KeeperException;
|
|||
* listening for changes in the RS node list and watching each node.
|
||||
*
|
||||
* <p>If an RS node gets deleted, this automatically handles calling of
|
||||
* {@link ServerManager#expireServer(org.apache.hadoop.hbase.HServerInfo)}.
|
||||
* {@link ServerManager#expireServer(ServerName)}
|
||||
*/
|
||||
public class RegionServerTracker extends ZooKeeperListener {
|
||||
private static final Log LOG = LogFactory.getLog(RegionServerTracker.class);
|
||||
|
@ -121,7 +120,6 @@ public class RegionServerTracker extends ZooKeeperListener {
|
|||
/**
|
||||
* Gets the online servers.
|
||||
* @return list of online servers
|
||||
* @throws KeeperException
|
||||
*/
|
||||
public List<ServerName> getOnlineServers() {
|
||||
synchronized (this.regionServers) {
|
||||
|
|
|
@ -68,7 +68,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener {
|
|||
* Starts the tracking of the node in ZooKeeper.
|
||||
*
|
||||
* <p>Use {@link #blockUntilAvailable()} to block until the node is available
|
||||
* or {@link #getData()} to get the data of the node if it is available.
|
||||
* or {@link #getData(boolean)} to get the data of the node if it is available.
|
||||
*/
|
||||
public synchronized void start() {
|
||||
this.watcher.registerListener(this);
|
||||
|
@ -143,7 +143,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener {
|
|||
* <p>If the node is currently available, the most up-to-date known version of
|
||||
* the data is returned. If the node is not currently available, null is
|
||||
* returned.
|
||||
* @param whether to refresh the data by calling ZK directly.
|
||||
* @param refresh whether to refresh the data by calling ZK directly.
|
||||
* @return data of the node, null if unavailable
|
||||
*/
|
||||
public synchronized byte [] getData(boolean refresh) {
|
||||
|
|
Loading…
Reference in New Issue