Revert "Correct Javadoc generation errors"
This reverts commit 0a227b79d6
.
This commit is contained in:
parent
4eb799331b
commit
5e6373e8ec
|
@ -20,10 +20,12 @@
|
||||||
/**
|
/**
|
||||||
Provides client classes for invoking Coprocessor RPC protocols
|
Provides client classes for invoking Coprocessor RPC protocols
|
||||||
|
|
||||||
|
<p>
|
||||||
<ul>
|
<ul>
|
||||||
<li><a href="#overview">Overview</a></li>
|
<li><a href="#overview">Overview</a></li>
|
||||||
<li><a href="#usage">Example Usage</a></li>
|
<li><a href="#usage">Example Usage</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
|
</p>
|
||||||
|
|
||||||
<h2><a name="overview">Overview</a></h2>
|
<h2><a name="overview">Overview</a></h2>
|
||||||
<p>
|
<p>
|
||||||
|
@ -36,7 +38,6 @@ protocols.
|
||||||
<p>
|
<p>
|
||||||
In order to provide a custom RPC protocol to clients, a coprocessor implementation
|
In order to provide a custom RPC protocol to clients, a coprocessor implementation
|
||||||
must:
|
must:
|
||||||
</p>
|
|
||||||
<ul>
|
<ul>
|
||||||
<li>Define a protocol buffer Service and supporting Message types for the RPC methods.
|
<li>Define a protocol buffer Service and supporting Message types for the RPC methods.
|
||||||
See the
|
See the
|
||||||
|
@ -48,7 +49,6 @@ must:
|
||||||
{@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()}
|
{@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()}
|
||||||
method should return a reference to the Endpoint's protocol buffer Service instance.
|
method should return a reference to the Endpoint's protocol buffer Service instance.
|
||||||
</ul>
|
</ul>
|
||||||
<p>
|
|
||||||
Clients may then call the defined service methods on coprocessor instances via
|
Clients may then call the defined service methods on coprocessor instances via
|
||||||
the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
|
the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
|
||||||
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
|
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
|
||||||
|
@ -63,7 +63,6 @@ method invocations. Since regions are seldom handled directly in client code
|
||||||
and the region names may change over time, the coprocessor RPC calls use row keys
|
and the region names may change over time, the coprocessor RPC calls use row keys
|
||||||
to identify which regions should be used for the method invocations. Clients
|
to identify which regions should be used for the method invocations. Clients
|
||||||
can call coprocessor Service methods against either:
|
can call coprocessor Service methods against either:
|
||||||
</p>
|
|
||||||
<ul>
|
<ul>
|
||||||
<li><strong>a single region</strong> - calling
|
<li><strong>a single region</strong> - calling
|
||||||
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
|
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
|
||||||
|
@ -78,6 +77,7 @@ can call coprocessor Service methods against either:
|
||||||
from the region containing the start row key to the region containing the end
|
from the region containing the start row key to the region containing the end
|
||||||
row key (inclusive), will we used as the RPC endpoints.</li>
|
row key (inclusive), will we used as the RPC endpoints.</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
</p>
|
||||||
|
|
||||||
<p><em>Note that the row keys passed as parameters to the <code>Table</code>
|
<p><em>Note that the row keys passed as parameters to the <code>Table</code>
|
||||||
methods are not passed directly to the coprocessor Service implementations.
|
methods are not passed directly to the coprocessor Service implementations.
|
||||||
|
@ -135,12 +135,12 @@ public static abstract class RowCountService
|
||||||
public abstract void getRowCount(
|
public abstract void getRowCount(
|
||||||
com.google.protobuf.RpcController controller,
|
com.google.protobuf.RpcController controller,
|
||||||
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
||||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||||
|
|
||||||
public abstract void getKeyValueCount(
|
public abstract void getKeyValueCount(
|
||||||
com.google.protobuf.RpcController controller,
|
com.google.protobuf.RpcController controller,
|
||||||
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
||||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
</pre></blockquote></div>
|
</pre></blockquote></div>
|
||||||
|
@ -163,13 +163,13 @@ use:
|
||||||
Connection connection = ConnectionFactory.createConnection(conf);
|
Connection connection = ConnectionFactory.createConnection(conf);
|
||||||
Table table = connection.getTable(TableName.valueOf("mytable"));
|
Table table = connection.getTable(TableName.valueOf("mytable"));
|
||||||
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
||||||
Map<byte[],Long> results = table.coprocessorService(
|
Map<byte[],Long> results = table.coprocessorService(
|
||||||
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
||||||
null, null, // start and end row keys
|
null, null, // start and end row keys
|
||||||
new Batch.Call<ExampleProtos.RowCountService,Long>() {
|
new Batch.Call<ExampleProtos.RowCountService,Long>() {
|
||||||
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
||||||
BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
|
BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
|
||||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||||
counter.getRowCount(null, request, rpcCallback);
|
counter.getRowCount(null, request, rpcCallback);
|
||||||
ExampleProtos.CountResponse response = rpcCallback.get();
|
ExampleProtos.CountResponse response = rpcCallback.get();
|
||||||
return response.hasCount() ? response.getCount() : 0;
|
return response.hasCount() ? response.getCount() : 0;
|
||||||
|
@ -204,17 +204,17 @@ Connection connection = ConnectionFactory.createConnection(conf);
|
||||||
Table table = connection.getTable(TableName.valueOf("mytable"));
|
Table table = connection.getTable(TableName.valueOf("mytable"));
|
||||||
// combine row count and kv count for region
|
// combine row count and kv count for region
|
||||||
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
||||||
Map<byte[],Long> results = table.coprocessorService(
|
Map<byte[],Long> results = table.coprocessorService(
|
||||||
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
||||||
null, null, // start and end row keys
|
null, null, // start and end row keys
|
||||||
new Batch.Call<ExampleProtos.RowCountService,Pair<Long,Long>>() {
|
new Batch.Call<ExampleProtos.RowCountService,Pair<Long,Long>>() {
|
||||||
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
||||||
BlockingRpcCallback<ExampleProtos.CountResponse> rowCallback =
|
BlockingRpcCallback<ExampleProtos.CountResponse> rowCallback =
|
||||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||||
counter.getRowCount(null, request, rowCallback);
|
counter.getRowCount(null, request, rowCallback);
|
||||||
|
|
||||||
BlockingRpcCallback<ExampleProtos.CountResponse> kvCallback =
|
BlockingRpcCallback<ExampleProtos.CountResponse> kvCallback =
|
||||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||||
counter.getKeyValueCount(null, request, kvCallback);
|
counter.getKeyValueCount(null, request, kvCallback);
|
||||||
|
|
||||||
ExampleProtos.CountResponse rowResponse = rowCallback.get();
|
ExampleProtos.CountResponse rowResponse = rowCallback.get();
|
||||||
|
|
|
@ -33,7 +33,7 @@ import com.google.common.collect.ImmutableMap;
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class ServerSideScanMetrics {
|
public class ServerSideScanMetrics {
|
||||||
/**
|
/**
|
||||||
* Hash to hold the String -> Atomic Long mappings for each metric
|
* Hash to hold the String -> Atomic Long mappings for each metric
|
||||||
*/
|
*/
|
||||||
private final Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
|
private final Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ public class ServerSideScanMetrics {
|
||||||
/**
|
/**
|
||||||
* Get all of the values since the last time this function was called. Calling this function will
|
* Get all of the values since the last time this function was called. Calling this function will
|
||||||
* reset all AtomicLongs in the instance back to 0.
|
* reset all AtomicLongs in the instance back to 0.
|
||||||
* @return A Map of String -> Long for metrics
|
* @return A Map of String -> Long for metrics
|
||||||
*/
|
*/
|
||||||
public Map<String, Long> getMetricsMap() {
|
public Map<String, Long> getMetricsMap() {
|
||||||
// Create a builder
|
// Create a builder
|
||||||
|
|
|
@ -270,8 +270,6 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param bytes Bytes to check.
|
* @param bytes Bytes to check.
|
||||||
* @param offset offset to start at
|
|
||||||
* @param len length to use
|
|
||||||
* @return True if passed <code>bytes</code> has {@link ProtobufMagic#PB_MAGIC} for a prefix.
|
* @return True if passed <code>bytes</code> has {@link ProtobufMagic#PB_MAGIC} for a prefix.
|
||||||
*/
|
*/
|
||||||
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
||||||
|
@ -281,7 +279,7 @@ public final class ProtobufUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param bytes bytes to check
|
* @param bytes
|
||||||
* @throws DeserializationException if we are missing the pb magic prefix
|
* @throws DeserializationException if we are missing the pb magic prefix
|
||||||
*/
|
*/
|
||||||
public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException {
|
public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException {
|
||||||
|
|
|
@ -70,8 +70,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
||||||
* cell
|
* cell
|
||||||
* @param left
|
* @param left
|
||||||
* @param right
|
* @param right
|
||||||
* @return an int greater than 0 if left > than right
|
* @return an int greater than 0 if left > than right
|
||||||
* lesser than 0 if left < than right
|
* lesser than 0 if left < than right
|
||||||
* equal to 0 if left is equal to right
|
* equal to 0 if left is equal to right
|
||||||
*/
|
*/
|
||||||
public final int compareKeyIgnoresMvcc(Cell left, Cell right) {
|
public final int compareKeyIgnoresMvcc(Cell left, Cell right) {
|
||||||
|
@ -512,8 +512,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
||||||
* wrong but it is intentional. This way, newer timestamps are first
|
* wrong but it is intentional. This way, newer timestamps are first
|
||||||
* found when we iterate over a memstore and newer versions are the
|
* found when we iterate over a memstore and newer versions are the
|
||||||
* first we trip over when reading from a store file.
|
* first we trip over when reading from a store file.
|
||||||
* @return 1 if left's timestamp < right's timestamp
|
* @return 1 if left's timestamp < right's timestamp
|
||||||
* -1 if left's timestamp > right's timestamp
|
* -1 if left's timestamp > right's timestamp
|
||||||
* 0 if both timestamps are equal
|
* 0 if both timestamps are equal
|
||||||
*/
|
*/
|
||||||
public static int compareTimestamps(final Cell left, final Cell right) {
|
public static int compareTimestamps(final Cell left, final Cell right) {
|
||||||
|
@ -601,8 +601,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
||||||
* wrong but it is intentional. This way, newer timestamps are first
|
* wrong but it is intentional. This way, newer timestamps are first
|
||||||
* found when we iterate over a memstore and newer versions are the
|
* found when we iterate over a memstore and newer versions are the
|
||||||
* first we trip over when reading from a store file.
|
* first we trip over when reading from a store file.
|
||||||
* @return 1 if left timestamp < right timestamp
|
* @return 1 if left timestamp < right timestamp
|
||||||
* -1 if left timestamp > right timestamp
|
* -1 if left timestamp > right timestamp
|
||||||
* 0 if both timestamps are equal
|
* 0 if both timestamps are equal
|
||||||
*/
|
*/
|
||||||
public static int compareTimestamps(final long ltimestamp, final long rtimestamp) {
|
public static int compareTimestamps(final long ltimestamp, final long rtimestamp) {
|
||||||
|
|
|
@ -66,7 +66,7 @@ import com.google.protobuf.Service;
|
||||||
* deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
|
* deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
|
||||||
* which the Scan selects will get deleted.
|
* which the Scan selects will get deleted.
|
||||||
*
|
*
|
||||||
* <br> Example: <pre><code>
|
* </br> Example: <code><pre>
|
||||||
* Scan scan = new Scan();
|
* Scan scan = new Scan();
|
||||||
* // set scan properties(rowkey range, filters, timerange etc).
|
* // set scan properties(rowkey range, filters, timerange etc).
|
||||||
* HTable ht = ...;
|
* HTable ht = ...;
|
||||||
|
@ -93,7 +93,7 @@ import com.google.protobuf.Service;
|
||||||
* for (BulkDeleteResponse response : result.values()) {
|
* for (BulkDeleteResponse response : result.values()) {
|
||||||
* noOfDeletedRows += response.getRowsDeleted();
|
* noOfDeletedRows += response.getRowsDeleted();
|
||||||
* }
|
* }
|
||||||
* </code></pre>
|
* </pre></code>
|
||||||
*/
|
*/
|
||||||
public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
|
public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
|
||||||
Coprocessor {
|
Coprocessor {
|
||||||
|
|
|
@ -310,7 +310,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
|
||||||
/****************** complete seek when token mismatch ******************/
|
/****************** complete seek when token mismatch ******************/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param searcherIsAfterInputKey <0: input key is before the searcher's position<br>
|
* @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
|
||||||
* >0: input key is after the searcher's position
|
* >0: input key is after the searcher's position
|
||||||
*/
|
*/
|
||||||
protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
|
protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
|
||||||
|
|
|
@ -33,7 +33,6 @@ import com.google.common.collect.Lists;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Individual node in a Trie structure. Each node is one of 3 types:
|
* Individual node in a Trie structure. Each node is one of 3 types:
|
||||||
* <ul>
|
|
||||||
* <li>Branch: an internal trie node that may have a token and must have multiple children, but does
|
* <li>Branch: an internal trie node that may have a token and must have multiple children, but does
|
||||||
* not represent an actual input byte[], hence its numOccurrences is 0
|
* not represent an actual input byte[], hence its numOccurrences is 0
|
||||||
* <li>Leaf: a node with no children and where numOccurrences is >= 1. It's token represents the
|
* <li>Leaf: a node with no children and where numOccurrences is >= 1. It's token represents the
|
||||||
|
@ -41,7 +40,6 @@ import com.google.common.collect.Lists;
|
||||||
* <li>Nub: a combination of a branch and leaf. Its token represents the last bytes of input
|
* <li>Nub: a combination of a branch and leaf. Its token represents the last bytes of input
|
||||||
* byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s
|
* byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s
|
||||||
* that add bytes to this nodes input byte[].
|
* that add bytes to this nodes input byte[].
|
||||||
* </ul>
|
|
||||||
* <br><br>
|
* <br><br>
|
||||||
* Example inputs (numInputs=7):
|
* Example inputs (numInputs=7):
|
||||||
* 0: AAA
|
* 0: AAA
|
||||||
|
@ -550,8 +548,7 @@ public class TokenizerNode{
|
||||||
/********************** simple mutation methods *************************/
|
/********************** simple mutation methods *************************/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Each occurrence > 1 indicates a repeat of the previous entry.
|
* Each occurrence > 1 indicates a repeat of the previous entry. This can be called directly by
|
||||||
* This can be called directly by
|
|
||||||
* an external class without going through the process of detecting a repeat if it is a known
|
* an external class without going through the process of detecting a repeat if it is a known
|
||||||
* repeat by some external mechanism. PtEncoder uses this when adding cells to a row if it knows
|
* repeat by some external mechanism. PtEncoder uses this when adding cells to a row if it knows
|
||||||
* the new cells are part of the current row.
|
* the new cells are part of the current row.
|
||||||
|
|
|
@ -50,8 +50,8 @@ public interface CellSearcher extends ReversibleCellScanner {
|
||||||
* exact match.
|
* exact match.
|
||||||
* </p>
|
* </p>
|
||||||
* @param key position the CellScanner on this key or the closest cell before
|
* @param key position the CellScanner on this key or the closest cell before
|
||||||
* @return AT if exact match<br>
|
* @return AT if exact match<br/>
|
||||||
* BEFORE if on last cell before key<br>
|
* BEFORE if on last cell before key<br/>
|
||||||
* BEFORE_FIRST if key was before the first cell in this scanner's scope
|
* BEFORE_FIRST if key was before the first cell in this scanner's scope
|
||||||
*/
|
*/
|
||||||
CellScannerPosition positionAtOrBefore(Cell key);
|
CellScannerPosition positionAtOrBefore(Cell key);
|
||||||
|
@ -62,8 +62,8 @@ public interface CellSearcher extends ReversibleCellScanner {
|
||||||
* match.
|
* match.
|
||||||
* </p>
|
* </p>
|
||||||
* @param key position the CellScanner on this key or the closest cell after
|
* @param key position the CellScanner on this key or the closest cell after
|
||||||
* @return AT if exact match<br>
|
* @return AT if exact match<br/>
|
||||||
* AFTER if on first cell after key<br>
|
* AFTER if on first cell after key<br/>
|
||||||
* AFTER_LAST if key was after the last cell in this scanner's scope
|
* AFTER_LAST if key was after the last cell in this scanner's scope
|
||||||
*/
|
*/
|
||||||
CellScannerPosition positionAtOrAfter(Cell key);
|
CellScannerPosition positionAtOrAfter(Cell key);
|
||||||
|
|
|
@ -64,6 +64,7 @@ public interface ProcedureStore {
|
||||||
/**
|
/**
|
||||||
* Returns the next procedure in the iteration.
|
* Returns the next procedure in the iteration.
|
||||||
* @throws IOException if there was an error fetching/deserializing the procedure
|
* @throws IOException if there was an error fetching/deserializing the procedure
|
||||||
|
* @throws NoSuchElementException if the iteration has no more elements
|
||||||
* @return the next procedure in the iteration.
|
* @return the next procedure in the iteration.
|
||||||
*/
|
*/
|
||||||
Procedure next() throws IOException;
|
Procedure next() throws IOException;
|
||||||
|
|
|
@ -62,7 +62,6 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
|
||||||
* Extracts the byte array from the given {@link ByteString} without copy.
|
* Extracts the byte array from the given {@link ByteString} without copy.
|
||||||
* @param buf A buffer from which to extract the array. This buffer must be
|
* @param buf A buffer from which to extract the array. This buffer must be
|
||||||
* actually an instance of a {@code LiteralByteString}.
|
* actually an instance of a {@code LiteralByteString}.
|
||||||
* @return byte[] representation
|
|
||||||
*/
|
*/
|
||||||
public static byte[] zeroCopyGetBytes(final ByteString buf) {
|
public static byte[] zeroCopyGetBytes(final ByteString buf) {
|
||||||
if (buf instanceof LiteralByteString) {
|
if (buf instanceof LiteralByteString) {
|
||||||
|
|
|
@ -74,8 +74,6 @@ public class ProtobufMagic {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param bytes Bytes to check.
|
* @param bytes Bytes to check.
|
||||||
* @param offset offset to start at
|
|
||||||
* @param len length to use
|
|
||||||
* @return True if passed <code>bytes</code> has {@link #PB_MAGIC} for a prefix.
|
* @return True if passed <code>bytes</code> has {@link #PB_MAGIC} for a prefix.
|
||||||
*/
|
*/
|
||||||
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class RemoteAdmin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return string representing the rest api's version
|
* @return string representing the rest api's version
|
||||||
* @throws IOException
|
* @throws IOEXception
|
||||||
* if the endpoint does not exist, there is a timeout, or some other
|
* if the endpoint does not exist, there is a timeout, or some other
|
||||||
* general failure mode
|
* general failure mode
|
||||||
*/
|
*/
|
||||||
|
@ -144,7 +144,7 @@ public class RemoteAdmin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return string representing the cluster's version
|
* @return string representing the cluster's version
|
||||||
* @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode
|
* @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
|
||||||
*/
|
*/
|
||||||
public StorageClusterStatusModel getClusterStatus() throws IOException {
|
public StorageClusterStatusModel getClusterStatus() throws IOException {
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ public class RemoteAdmin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return string representing the cluster's version
|
* @return string representing the cluster's version
|
||||||
* @throws IOException
|
* @throws IOEXception
|
||||||
* if the endpoint does not exist, there is a timeout, or some other
|
* if the endpoint does not exist, there is a timeout, or some other
|
||||||
* general failure mode
|
* general failure mode
|
||||||
*/
|
*/
|
||||||
|
@ -357,7 +357,7 @@ public class RemoteAdmin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return string representing the cluster's version
|
* @return string representing the cluster's version
|
||||||
* @throws IOException
|
* @throws IOEXception
|
||||||
* if the endpoint does not exist, there is a timeout, or some other
|
* if the endpoint does not exist, there is a timeout, or some other
|
||||||
* general failure mode
|
* general failure mode
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -87,7 +87,7 @@ import com.sun.jersey.api.json.JSONUnmarshaller;
|
||||||
*
|
*
|
||||||
* <pre>
|
* <pre>
|
||||||
* <complexType name="Scanner">
|
* <complexType name="Scanner">
|
||||||
* <sequence>
|
* <sequence>
|
||||||
* <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
|
* <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
* <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
|
* <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
|
||||||
* </sequence>
|
* </sequence>
|
||||||
|
|
|
@ -1096,7 +1096,7 @@ public interface RegionObserver extends Coprocessor {
|
||||||
* <li>
|
* <li>
|
||||||
* <code>boolean filterRow()</code> returning true</li>
|
* <code>boolean filterRow()</code> returning true</li>
|
||||||
* <li>
|
* <li>
|
||||||
* <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
|
* <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
|
||||||
* the passed List</li>
|
* the passed List</li>
|
||||||
* </ol>
|
* </ol>
|
||||||
* @param c the environment provided by the region server
|
* @param c the environment provided by the region server
|
||||||
|
|
|
@ -96,7 +96,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||||
* There are three contexts:
|
* There are three contexts:
|
||||||
* "/logs/" -> points to the log directory
|
* "/logs/" -> points to the log directory
|
||||||
* "/static/" -> points to common static files (src/webapps/static)
|
* "/static/" -> points to common static files (src/webapps/static)
|
||||||
* "/" -> the jsp server code from (src/webapps/<name>)
|
* "/" -> the jsp server code from (src/webapps/<name>)
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
@ -447,7 +447,7 @@ public class HttpServer implements FilterContainer {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a status server on the given port.
|
* Create a status server on the given port.
|
||||||
* The jsp scripts are taken from src/webapps/<name>.
|
* The jsp scripts are taken from src/webapps/<name>.
|
||||||
* @param name The name of the server
|
* @param name The name of the server
|
||||||
* @param bindAddress The address for this server
|
* @param bindAddress The address for this server
|
||||||
* @param port The port to use on the server
|
* @param port The port to use on the server
|
||||||
|
@ -466,7 +466,7 @@ public class HttpServer implements FilterContainer {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a status server on the given port.
|
* Create a status server on the given port.
|
||||||
* The jsp scripts are taken from src/webapps/<name>.
|
* The jsp scripts are taken from src/webapps/<name>.
|
||||||
* @param name The name of the server
|
* @param name The name of the server
|
||||||
* @param bindAddress The address for this server
|
* @param bindAddress The address for this server
|
||||||
* @param port The port to use on the server
|
* @param port The port to use on the server
|
||||||
|
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
* Create a Jetty embedded server to answer http requests. The primary goal
|
* Create a Jetty embedded server to answer http requests. The primary goal
|
||||||
* is to serve up status information for the server.
|
* is to serve up status information for the server.
|
||||||
* There are three contexts:
|
* There are three contexts:
|
||||||
* "/stacks/" -> points to stack trace
|
* "/stacks/" -> points to stack trace
|
||||||
* "/static/" -> points to common static files (src/hbase-webapps/static)
|
* "/static/" -> points to common static files (src/hbase-webapps/static)
|
||||||
* "/" -> the jsp server code from (src/hbase-webapps/<name>)
|
* "/" -> the jsp server code from (src/hbase-webapps/<name>)
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class InfoServer {
|
public class InfoServer {
|
||||||
|
@ -44,7 +44,7 @@ public class InfoServer {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a status server on the given port.
|
* Create a status server on the given port.
|
||||||
* The jsp scripts are taken from src/hbase-webapps/<code>name</code>.
|
* The jsp scripts are taken from src/hbase-webapps/<code>name<code>.
|
||||||
* @param name The name of the server
|
* @param name The name of the server
|
||||||
* @param bindAddress address to bind to
|
* @param bindAddress address to bind to
|
||||||
* @param port The port to use on the server
|
* @param port The port to use on the server
|
||||||
|
|
|
@ -51,34 +51,28 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
||||||
* functionality is provided through the
|
* functionality is provided through the
|
||||||
* {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
|
* {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
|
||||||
* method.
|
* method.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* For example <code>http://.../jmx?qry=Hadoop:*</code> will return
|
* For example <code>http://.../jmx?qry=Hadoop:*</code> will return
|
||||||
* all hadoop metrics exposed through JMX.
|
* all hadoop metrics exposed through JMX.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* The optional <code>get</code> parameter is used to query an specific
|
* The optional <code>get</code> parameter is used to query an specific
|
||||||
* attribute of a JMX bean. The format of the URL is
|
* attribute of a JMX bean. The format of the URL is
|
||||||
* <code>http://.../jmx?get=MXBeanName::AttributeName</code>
|
* <code>http://.../jmx?get=MXBeanName::AttributeName<code>
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* For example
|
* For example
|
||||||
* <code>
|
* <code>
|
||||||
* http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
|
* http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
|
||||||
* </code> will return the cluster id of the namenode mxbean.
|
* </code> will return the cluster id of the namenode mxbean.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* If the <code>qry</code> or the <code>get</code> parameter is not formatted
|
* If the <code>qry</code> or the <code>get</code> parameter is not formatted
|
||||||
* correctly then a 400 BAD REQUEST http response code will be returned.
|
* correctly then a 400 BAD REQUEST http response code will be returned.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* If a resouce such as a mbean or attribute can not be found,
|
* If a resouce such as a mbean or attribute can not be found,
|
||||||
* a 404 SC_NOT_FOUND http response code will be returned.
|
* a 404 SC_NOT_FOUND http response code will be returned.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* The return format is JSON and in the form
|
* The return format is JSON and in the form
|
||||||
* </p>
|
* <p>
|
||||||
* <pre><code>
|
* <code><pre>
|
||||||
* {
|
* {
|
||||||
* "beans" : [
|
* "beans" : [
|
||||||
* {
|
* {
|
||||||
|
@ -87,7 +81,7 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
||||||
* }
|
* }
|
||||||
* ]
|
* ]
|
||||||
* }
|
* }
|
||||||
* </code></pre>
|
* </pre></code>
|
||||||
* <p>
|
* <p>
|
||||||
* The servlet attempts to convert the the JMXBeans into JSON. Each
|
* The servlet attempts to convert the the JMXBeans into JSON. Each
|
||||||
* bean's attributes will be converted to a JSON object member.
|
* bean's attributes will be converted to a JSON object member.
|
||||||
|
@ -107,7 +101,6 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
||||||
* The bean's name and modelerType will be returned for all beans.
|
* The bean's name and modelerType will be returned for all beans.
|
||||||
*
|
*
|
||||||
* Optional paramater "callback" should be used to deliver JSONP response.
|
* Optional paramater "callback" should be used to deliver JSONP response.
|
||||||
* </p>
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class JMXJsonServlet extends HttpServlet {
|
public class JMXJsonServlet extends HttpServlet {
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
* This package provides access to JMX primarily through the
|
* This package provides access to JMX primarily through the
|
||||||
* {@link org.apache.hadoop.hbase.http.jmx.JMXJsonServlet} class.
|
* {@link org.apache.hadoop.hbase.http.jmx.JMXJsonServlet} class.
|
||||||
* <p>
|
* <p>
|
||||||
* Copied from hadoop source code.<br>
|
* Copied from hadoop source code.<br/>
|
||||||
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
|
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
* users a static configured user.
|
* users a static configured user.
|
||||||
* </ul>
|
* </ul>
|
||||||
* <p>
|
* <p>
|
||||||
* Copied from hadoop source code.<br>
|
* Copied from hadoop source code.<br/>
|
||||||
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why
|
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -52,14 +52,13 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides functionality to write ({@link BlockIndexWriter}) and read
|
* Provides functionality to write ({@link BlockIndexWriter}) and read
|
||||||
* ({@link BlockIndexReader})
|
* ({@link org.apache.hadoop.hbase.io.hfile.BlockIndexReader})
|
||||||
* single-level and multi-level block indexes.
|
* single-level and multi-level block indexes.
|
||||||
*
|
*
|
||||||
* Examples of how to use the block index writer can be found in
|
* Examples of how to use the block index writer can be found in
|
||||||
* {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
|
* {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
|
||||||
* {@link HFileWriterImpl}. Examples of how to use the reader can be
|
* {@link HFileWriterImpl}. Examples of how to use the reader can be
|
||||||
* found in {@link HFileWriterImpl} and
|
* found in {@link HFileWriterImpl} and TestHFileBlockIndex.
|
||||||
* {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}.
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class HFileBlockIndex {
|
public class HFileBlockIndex {
|
||||||
|
|
|
@ -407,8 +407,7 @@ public final class BucketAllocator {
|
||||||
/**
|
/**
|
||||||
* Allocate a block with specified size. Return the offset
|
* Allocate a block with specified size. Return the offset
|
||||||
* @param blockSize size of block
|
* @param blockSize size of block
|
||||||
* @throws BucketAllocatorException
|
* @throws BucketAllocatorException,CacheFullException
|
||||||
* @throws CacheFullException
|
|
||||||
* @return the offset in the IOEngine
|
* @return the offset in the IOEngine
|
||||||
*/
|
*/
|
||||||
public synchronized long allocateBlock(int blockSize) throws CacheFullException,
|
public synchronized long allocateBlock(int blockSize) throws CacheFullException,
|
||||||
|
|
|
@ -168,7 +168,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculates the splits that will serve as input for the map tasks.
|
* Calculates the splits that will serve as input for the map tasks.
|
||||||
*
|
* <ul>
|
||||||
* Splits are created in number equal to the smallest between numSplits and
|
* Splits are created in number equal to the smallest between numSplits and
|
||||||
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
||||||
* If the number of splits is smaller than the number of
|
* If the number of splits is smaller than the number of
|
||||||
|
|
|
@ -118,11 +118,11 @@ public class MultiTableSnapshotInputFormatImpl {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
||||||
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
|
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
|
||||||
*
|
*
|
||||||
* @param conf Configuration to extract name -> list<scan> mappings from.
|
* @param conf Configuration to extract name -> list<scan> mappings from.
|
||||||
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
|
public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
|
||||||
|
|
|
@ -25,17 +25,14 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
* <p>
|
* <p>
|
||||||
* If other effects are needed, implement your own LogCleanerDelegate and add it to the
|
* If other effects are needed, implement your own LogCleanerDelegate and add it to the
|
||||||
* configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
|
* configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
|
||||||
* qualified class names. The <code>HFileCleaner</code> will build the cleaner chain in
|
* qualified class names. The <code>HFileCleaner<code> will build the cleaner chain in
|
||||||
* order the order specified by the configuration.
|
* order the order specified by the configuration.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* For subclasses, setConf will be called exactly <i>once</i> before using the cleaner.
|
* For subclasses, setConf will be called exactly <i>once</i> before using the cleaner.
|
||||||
* </p>
|
|
||||||
* <p>
|
* <p>
|
||||||
* Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
|
* Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
|
||||||
* HFileCleaner by reflection, classes that implements this interface <b>must</b>
|
* HFileCleaner by reflection, classes that implements this interface <b>must</b>
|
||||||
* provide a default constructor.
|
* provide a default constructor.
|
||||||
* </p>
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {
|
public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {
|
||||||
|
|
|
@ -232,6 +232,7 @@ public abstract class TableEventHandler extends EventHandler {
|
||||||
* Gets a TableDescriptor from the masterServices. Can Throw exceptions.
|
* Gets a TableDescriptor from the masterServices. Can Throw exceptions.
|
||||||
*
|
*
|
||||||
* @return Table descriptor for this table
|
* @return Table descriptor for this table
|
||||||
|
* @throws TableExistsException
|
||||||
* @throws FileNotFoundException
|
* @throws FileNotFoundException
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
* <li>SnapshotDescription is readable</li>
|
* <li>SnapshotDescription is readable</li>
|
||||||
* <li>Table info is readable</li>
|
* <li>Table info is readable</li>
|
||||||
* <li>Regions</li>
|
* <li>Regions</li>
|
||||||
* </ol>
|
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>Matching regions in the snapshot as currently in the table</li>
|
* <li>Matching regions in the snapshot as currently in the table</li>
|
||||||
* <li>{@link HRegionInfo} matches the current and stored regions</li>
|
* <li>{@link HRegionInfo} matches the current and stored regions</li>
|
||||||
|
@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
||||||
* <li>All the hfiles are present (either in .archive directory in the region)</li>
|
* <li>All the hfiles are present (either in .archive directory in the region)</li>
|
||||||
* <li>All recovered.edits files are present (by name) and have the correct file size</li>
|
* <li>All recovered.edits files are present (by name) and have the correct file size</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
|
* </ol>
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
|
|
@ -3776,6 +3776,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
* the maxSeqId for the store to be applied, else its skipped.
|
* the maxSeqId for the store to be applied, else its skipped.
|
||||||
* @return the sequence id of the last edit added to this region out of the
|
* @return the sequence id of the last edit added to this region out of the
|
||||||
* recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
|
* recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
|
||||||
|
* @throws UnsupportedEncodingException
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected long replayRecoveredEditsIfAny(final Path regiondir,
|
protected long replayRecoveredEditsIfAny(final Path regiondir,
|
||||||
|
|
|
@ -27,20 +27,18 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
|
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>
|
|
||||||
* Compaction configuration for a particular instance of HStore.
|
* Compaction configuration for a particular instance of HStore.
|
||||||
* Takes into account both global settings and ones set on the column family/store.
|
* Takes into account both global settings and ones set on the column family/store.
|
||||||
* Control knobs for default compaction algorithm:
|
* Control knobs for default compaction algorithm:
|
||||||
* </p>
|
* <p/>
|
||||||
* <p>
|
|
||||||
* maxCompactSize - upper bound on file size to be included in minor compactions
|
* maxCompactSize - upper bound on file size to be included in minor compactions
|
||||||
* minCompactSize - lower bound below which compaction is selected without ratio test
|
* minCompactSize - lower bound below which compaction is selected without ratio test
|
||||||
* minFilesToCompact - lower bound on number of files in any minor compaction
|
* minFilesToCompact - lower bound on number of files in any minor compaction
|
||||||
* maxFilesToCompact - upper bound on number of files in any minor compaction
|
* maxFilesToCompact - upper bound on number of files in any minor compaction
|
||||||
* compactionRatio - Ratio used for compaction
|
* compactionRatio - Ratio used for compaction
|
||||||
* minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195)
|
* minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195)
|
||||||
* </p>
|
* <p/>
|
||||||
* Set parameter as "hbase.hstore.compaction.<attribute>"
|
* Set parameter as "hbase.hstore.compaction.<attribute>"
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
|
@ -226,7 +226,7 @@ public abstract class Compactor {
|
||||||
* @param scanner Where to read from.
|
* @param scanner Where to read from.
|
||||||
* @param writer Where to write to.
|
* @param writer Where to write to.
|
||||||
* @param smallestReadPoint Smallest read point.
|
* @param smallestReadPoint Smallest read point.
|
||||||
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
|
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
|
||||||
* @return Whether compaction ended; false if it was interrupted for some reason.
|
* @return Whether compaction ended; false if it was interrupted for some reason.
|
||||||
*/
|
*/
|
||||||
protected boolean performCompaction(InternalScanner scanner, CellSink writer,
|
protected boolean performCompaction(InternalScanner scanner, CellSink writer,
|
||||||
|
|
|
@ -32,31 +32,26 @@ import java.io.IOException;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>
|
|
||||||
* This coprocessor 'shallows' all the writes. It allows to test a pure
|
* This coprocessor 'shallows' all the writes. It allows to test a pure
|
||||||
* write workload, going through all the communication layers.
|
* write workload, going through all the communication layers.
|
||||||
* The reads will work as well, but they as we never write, they will always always
|
* The reads will work as well, but they as we never write, they will always always
|
||||||
* return an empty structure. The WAL is also skipped.
|
* return an empty structure. The WAL is also skipped.
|
||||||
* Obviously, the region will never be split automatically. It's up to the user
|
* Obviously, the region will never be split automatically. It's up to the user
|
||||||
* to split and move it.
|
* to split and move it.
|
||||||
* </p>
|
* <p/>
|
||||||
* <p>
|
|
||||||
* For a table created like this:
|
* For a table created like this:
|
||||||
* create 'usertable', {NAME => 'f1', VERSIONS => 1}
|
* create 'usertable', {NAME => 'f1', VERSIONS => 1}
|
||||||
* </p>
|
* <p/>
|
||||||
* <p>
|
|
||||||
* You can then add the coprocessor with this command:
|
* You can then add the coprocessor with this command:
|
||||||
* alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
|
* alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
|
||||||
* </p>
|
* <p/>
|
||||||
* <p>
|
|
||||||
* And then
|
* And then
|
||||||
* put 'usertable', 'f1', 'f1', 'f1'
|
* put 'usertable', 'f1', 'f1', 'f1'
|
||||||
* </p>
|
* <p/>
|
||||||
* <p>
|
|
||||||
* scan 'usertable'
|
* scan 'usertable'
|
||||||
* Will return:
|
* Will return:
|
||||||
* 0 row(s) in 0.0050 seconds
|
* 0 row(s) in 0.0050 seconds
|
||||||
* </p>
|
* <p/>
|
||||||
*/
|
*/
|
||||||
public class WriteSinkCoprocessor extends BaseRegionObserver {
|
public class WriteSinkCoprocessor extends BaseRegionObserver {
|
||||||
private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);
|
private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class BloomFilterChunk implements BloomFilterBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determines & initializes bloom filter meta data from user config. Call
|
* Determines & initializes bloom filter meta data from user config. Call
|
||||||
* {@link #allocBloom()} to allocate bloom filter data.
|
* {@link #allocBloom()} to allocate bloom filter data.
|
||||||
*
|
*
|
||||||
* @param maxKeys Maximum expected number of keys that will be stored in this
|
* @param maxKeys Maximum expected number of keys that will be stored in this
|
||||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
|
||||||
/**
|
/**
|
||||||
* This class marches through all of the region's hfiles and verifies that
|
* This class marches through all of the region's hfiles and verifies that
|
||||||
* they are all valid files. One just needs to instantiate the class, use
|
* they are all valid files. One just needs to instantiate the class, use
|
||||||
* checkTables(List<Path>) and then retrieve the corrupted hfiles (and
|
* checkTables(List<Path>) and then retrieve the corrupted hfiles (and
|
||||||
* quarantined files if in quarantining mode)
|
* quarantined files if in quarantining mode)
|
||||||
*
|
*
|
||||||
* The implementation currently parallelizes at the regionDir level.
|
* The implementation currently parallelizes at the regionDir level.
|
||||||
|
|
|
@ -304,8 +304,8 @@ public class DefaultWALProvider implements WALProvider {
|
||||||
* This function returns region server name from a log file name which is in one of the following
|
* This function returns region server name from a log file name which is in one of the following
|
||||||
* formats:
|
* formats:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...</li>
|
* <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...
|
||||||
* <li>hdfs://<name node>/hbase/.logs/<server name>/...</li>
|
* <li>hdfs://<name node>/hbase/.logs/<server name>/...
|
||||||
* </ul>
|
* </ul>
|
||||||
* @param logFile
|
* @param logFile
|
||||||
* @return null if the passed in logFile isn't a valid WAL file path
|
* @return null if the passed in logFile isn't a valid WAL file path
|
||||||
|
|
|
@ -2228,13 +2228,13 @@ public class WALSplitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is used to construct mutations from a WALEntry. It also
|
* This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
|
||||||
* reconstructs WALKey & WALEdit from the passed in WALEntry
|
* WALEdit from the passed in WALEntry
|
||||||
* @param entry
|
* @param entry
|
||||||
* @param cells
|
* @param cells
|
||||||
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
|
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
|
||||||
* extracted from the passed in WALEntry.
|
* extracted from the passed in WALEntry.
|
||||||
* @return list of Pair<MutationType, Mutation> to be replayed
|
* @return list of Pair<MutationType, Mutation> to be replayed
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells,
|
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells,
|
||||||
|
|
|
@ -417,7 +417,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Visits the locks (both held and attempted) with the given MetadataHandler.
|
* Visits the locks (both held and attempted) with the given MetadataHandler.
|
||||||
* @throws IOException If there is an unrecoverable error
|
* @throws InterruptedException If there is an unrecoverable error
|
||||||
*/
|
*/
|
||||||
public void visitLocks(MetadataHandler handler) throws IOException {
|
public void visitLocks(MetadataHandler handler) throws IOException {
|
||||||
List<String> children;
|
List<String> children;
|
||||||
|
|
Loading…
Reference in New Issue