Revert "Correct Javadoc generation errors"
This reverts commit 0a227b79d6
.
This commit is contained in:
parent
4eb799331b
commit
5e6373e8ec
|
@ -20,10 +20,12 @@
|
|||
/**
|
||||
Provides client classes for invoking Coprocessor RPC protocols
|
||||
|
||||
<p>
|
||||
<ul>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
<li><a href="#usage">Example Usage</a></li>
|
||||
</ul>
|
||||
</p>
|
||||
|
||||
<h2><a name="overview">Overview</a></h2>
|
||||
<p>
|
||||
|
@ -36,7 +38,6 @@ protocols.
|
|||
<p>
|
||||
In order to provide a custom RPC protocol to clients, a coprocessor implementation
|
||||
must:
|
||||
</p>
|
||||
<ul>
|
||||
<li>Define a protocol buffer Service and supporting Message types for the RPC methods.
|
||||
See the
|
||||
|
@ -48,7 +49,6 @@ must:
|
|||
{@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()}
|
||||
method should return a reference to the Endpoint's protocol buffer Service instance.
|
||||
</ul>
|
||||
<p>
|
||||
Clients may then call the defined service methods on coprocessor instances via
|
||||
the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
|
||||
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
|
||||
|
@ -63,7 +63,6 @@ method invocations. Since regions are seldom handled directly in client code
|
|||
and the region names may change over time, the coprocessor RPC calls use row keys
|
||||
to identify which regions should be used for the method invocations. Clients
|
||||
can call coprocessor Service methods against either:
|
||||
</p>
|
||||
<ul>
|
||||
<li><strong>a single region</strong> - calling
|
||||
{@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
|
||||
|
@ -78,6 +77,7 @@ can call coprocessor Service methods against either:
|
|||
from the region containing the start row key to the region containing the end
|
||||
row key (inclusive), will we used as the RPC endpoints.</li>
|
||||
</ul>
|
||||
</p>
|
||||
|
||||
<p><em>Note that the row keys passed as parameters to the <code>Table</code>
|
||||
methods are not passed directly to the coprocessor Service implementations.
|
||||
|
@ -135,12 +135,12 @@ public static abstract class RowCountService
|
|||
public abstract void getRowCount(
|
||||
com.google.protobuf.RpcController controller,
|
||||
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||
|
||||
public abstract void getKeyValueCount(
|
||||
com.google.protobuf.RpcController controller,
|
||||
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
|
||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
|
||||
}
|
||||
}
|
||||
</pre></blockquote></div>
|
||||
|
@ -163,13 +163,13 @@ use:
|
|||
Connection connection = ConnectionFactory.createConnection(conf);
|
||||
Table table = connection.getTable(TableName.valueOf("mytable"));
|
||||
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
||||
Map<byte[],Long> results = table.coprocessorService(
|
||||
Map<byte[],Long> results = table.coprocessorService(
|
||||
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
||||
null, null, // start and end row keys
|
||||
new Batch.Call<ExampleProtos.RowCountService,Long>() {
|
||||
new Batch.Call<ExampleProtos.RowCountService,Long>() {
|
||||
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
counter.getRowCount(null, request, rpcCallback);
|
||||
ExampleProtos.CountResponse response = rpcCallback.get();
|
||||
return response.hasCount() ? response.getCount() : 0;
|
||||
|
@ -204,17 +204,17 @@ Connection connection = ConnectionFactory.createConnection(conf);
|
|||
Table table = connection.getTable(TableName.valueOf("mytable"));
|
||||
// combine row count and kv count for region
|
||||
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
|
||||
Map<byte[],Long> results = table.coprocessorService(
|
||||
Map<byte[],Long> results = table.coprocessorService(
|
||||
ExampleProtos.RowCountService.class, // the protocol interface we're invoking
|
||||
null, null, // start and end row keys
|
||||
new Batch.Call<ExampleProtos.RowCountService,Pair<Long,Long>>() {
|
||||
new Batch.Call<ExampleProtos.RowCountService,Pair<Long,Long>>() {
|
||||
public Long call(ExampleProtos.RowCountService counter) throws IOException {
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> rowCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> rowCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
counter.getRowCount(null, request, rowCallback);
|
||||
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> kvCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
BlockingRpcCallback<ExampleProtos.CountResponse> kvCallback =
|
||||
new BlockingRpcCallback<ExampleProtos.CountResponse>();
|
||||
counter.getKeyValueCount(null, request, kvCallback);
|
||||
|
||||
ExampleProtos.CountResponse rowResponse = rowCallback.get();
|
||||
|
|
|
@ -33,7 +33,7 @@ import com.google.common.collect.ImmutableMap;
|
|||
@InterfaceStability.Evolving
|
||||
public class ServerSideScanMetrics {
|
||||
/**
|
||||
* Hash to hold the String -> Atomic Long mappings for each metric
|
||||
* Hash to hold the String -> Atomic Long mappings for each metric
|
||||
*/
|
||||
private final Map<String, AtomicLong> counters = new HashMap<String, AtomicLong>();
|
||||
|
||||
|
@ -103,7 +103,7 @@ public class ServerSideScanMetrics {
|
|||
/**
|
||||
* Get all of the values since the last time this function was called. Calling this function will
|
||||
* reset all AtomicLongs in the instance back to 0.
|
||||
* @return A Map of String -> Long for metrics
|
||||
* @return A Map of String -> Long for metrics
|
||||
*/
|
||||
public Map<String, Long> getMetricsMap() {
|
||||
// Create a builder
|
||||
|
|
|
@ -270,8 +270,6 @@ public final class ProtobufUtil {
|
|||
|
||||
/**
|
||||
* @param bytes Bytes to check.
|
||||
* @param offset offset to start at
|
||||
* @param len length to use
|
||||
* @return True if passed <code>bytes</code> has {@link ProtobufMagic#PB_MAGIC} for a prefix.
|
||||
*/
|
||||
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
||||
|
@ -281,7 +279,7 @@ public final class ProtobufUtil {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param bytes bytes to check
|
||||
* @param bytes
|
||||
* @throws DeserializationException if we are missing the pb magic prefix
|
||||
*/
|
||||
public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException {
|
||||
|
|
|
@ -70,8 +70,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
|||
* cell
|
||||
* @param left
|
||||
* @param right
|
||||
* @return an int greater than 0 if left > than right
|
||||
* lesser than 0 if left < than right
|
||||
* @return an int greater than 0 if left > than right
|
||||
* lesser than 0 if left < than right
|
||||
* equal to 0 if left is equal to right
|
||||
*/
|
||||
public final int compareKeyIgnoresMvcc(Cell left, Cell right) {
|
||||
|
@ -512,8 +512,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
|||
* wrong but it is intentional. This way, newer timestamps are first
|
||||
* found when we iterate over a memstore and newer versions are the
|
||||
* first we trip over when reading from a store file.
|
||||
* @return 1 if left's timestamp < right's timestamp
|
||||
* -1 if left's timestamp > right's timestamp
|
||||
* @return 1 if left's timestamp < right's timestamp
|
||||
* -1 if left's timestamp > right's timestamp
|
||||
* 0 if both timestamps are equal
|
||||
*/
|
||||
public static int compareTimestamps(final Cell left, final Cell right) {
|
||||
|
@ -601,8 +601,8 @@ public class CellComparator implements Comparator<Cell>, Serializable {
|
|||
* wrong but it is intentional. This way, newer timestamps are first
|
||||
* found when we iterate over a memstore and newer versions are the
|
||||
* first we trip over when reading from a store file.
|
||||
* @return 1 if left timestamp < right timestamp
|
||||
* -1 if left timestamp > right timestamp
|
||||
* @return 1 if left timestamp < right timestamp
|
||||
* -1 if left timestamp > right timestamp
|
||||
* 0 if both timestamps are equal
|
||||
*/
|
||||
public static int compareTimestamps(final long ltimestamp, final long rtimestamp) {
|
||||
|
|
|
@ -66,7 +66,7 @@ import com.google.protobuf.Service;
|
|||
* deleted(even if Scan fetches many versions). When timestamp passed as null, all the versions
|
||||
* which the Scan selects will get deleted.
|
||||
*
|
||||
* <br> Example: <pre><code>
|
||||
* </br> Example: <code><pre>
|
||||
* Scan scan = new Scan();
|
||||
* // set scan properties(rowkey range, filters, timerange etc).
|
||||
* HTable ht = ...;
|
||||
|
@ -93,7 +93,7 @@ import com.google.protobuf.Service;
|
|||
* for (BulkDeleteResponse response : result.values()) {
|
||||
* noOfDeletedRows += response.getRowsDeleted();
|
||||
* }
|
||||
* </code></pre>
|
||||
* </pre></code>
|
||||
*/
|
||||
public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
|
||||
Coprocessor {
|
||||
|
|
|
@ -310,7 +310,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
|
|||
/****************** complete seek when token mismatch ******************/
|
||||
|
||||
/**
|
||||
* @param searcherIsAfterInputKey <0: input key is before the searcher's position<br>
|
||||
* @param searcherIsAfterInputKey <0: input key is before the searcher's position<br/>
|
||||
* >0: input key is after the searcher's position
|
||||
*/
|
||||
protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
|
||||
|
|
|
@ -33,7 +33,6 @@ import com.google.common.collect.Lists;
|
|||
|
||||
/**
|
||||
* Individual node in a Trie structure. Each node is one of 3 types:
|
||||
* <ul>
|
||||
* <li>Branch: an internal trie node that may have a token and must have multiple children, but does
|
||||
* not represent an actual input byte[], hence its numOccurrences is 0
|
||||
* <li>Leaf: a node with no children and where numOccurrences is >= 1. It's token represents the
|
||||
|
@ -41,7 +40,6 @@ import com.google.common.collect.Lists;
|
|||
* <li>Nub: a combination of a branch and leaf. Its token represents the last bytes of input
|
||||
* byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s
|
||||
* that add bytes to this nodes input byte[].
|
||||
* </ul>
|
||||
* <br><br>
|
||||
* Example inputs (numInputs=7):
|
||||
* 0: AAA
|
||||
|
@ -550,8 +548,7 @@ public class TokenizerNode{
|
|||
/********************** simple mutation methods *************************/
|
||||
|
||||
/**
|
||||
* Each occurrence > 1 indicates a repeat of the previous entry.
|
||||
* This can be called directly by
|
||||
* Each occurrence > 1 indicates a repeat of the previous entry. This can be called directly by
|
||||
* an external class without going through the process of detecting a repeat if it is a known
|
||||
* repeat by some external mechanism. PtEncoder uses this when adding cells to a row if it knows
|
||||
* the new cells are part of the current row.
|
||||
|
|
|
@ -50,8 +50,8 @@ public interface CellSearcher extends ReversibleCellScanner {
|
|||
* exact match.
|
||||
* </p>
|
||||
* @param key position the CellScanner on this key or the closest cell before
|
||||
* @return AT if exact match<br>
|
||||
* BEFORE if on last cell before key<br>
|
||||
* @return AT if exact match<br/>
|
||||
* BEFORE if on last cell before key<br/>
|
||||
* BEFORE_FIRST if key was before the first cell in this scanner's scope
|
||||
*/
|
||||
CellScannerPosition positionAtOrBefore(Cell key);
|
||||
|
@ -62,8 +62,8 @@ public interface CellSearcher extends ReversibleCellScanner {
|
|||
* match.
|
||||
* </p>
|
||||
* @param key position the CellScanner on this key or the closest cell after
|
||||
* @return AT if exact match<br>
|
||||
* AFTER if on first cell after key<br>
|
||||
* @return AT if exact match<br/>
|
||||
* AFTER if on first cell after key<br/>
|
||||
* AFTER_LAST if key was after the last cell in this scanner's scope
|
||||
*/
|
||||
CellScannerPosition positionAtOrAfter(Cell key);
|
||||
|
|
|
@ -64,6 +64,7 @@ public interface ProcedureStore {
|
|||
/**
|
||||
* Returns the next procedure in the iteration.
|
||||
* @throws IOException if there was an error fetching/deserializing the procedure
|
||||
* @throws NoSuchElementException if the iteration has no more elements
|
||||
* @return the next procedure in the iteration.
|
||||
*/
|
||||
Procedure next() throws IOException;
|
||||
|
|
|
@ -62,7 +62,6 @@ public final class HBaseZeroCopyByteString extends LiteralByteString {
|
|||
* Extracts the byte array from the given {@link ByteString} without copy.
|
||||
* @param buf A buffer from which to extract the array. This buffer must be
|
||||
* actually an instance of a {@code LiteralByteString}.
|
||||
* @return byte[] representation
|
||||
*/
|
||||
public static byte[] zeroCopyGetBytes(final ByteString buf) {
|
||||
if (buf instanceof LiteralByteString) {
|
||||
|
|
|
@ -74,8 +74,6 @@ public class ProtobufMagic {
|
|||
|
||||
/**
|
||||
* @param bytes Bytes to check.
|
||||
* @param offset offset to start at
|
||||
* @param len length to use
|
||||
* @return True if passed <code>bytes</code> has {@link #PB_MAGIC} for a prefix.
|
||||
*/
|
||||
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
|
||||
|
|
|
@ -100,7 +100,7 @@ public class RemoteAdmin {
|
|||
|
||||
/**
|
||||
* @return string representing the rest api's version
|
||||
* @throws IOException
|
||||
* @throws IOEXception
|
||||
* if the endpoint does not exist, there is a timeout, or some other
|
||||
* general failure mode
|
||||
*/
|
||||
|
@ -144,7 +144,7 @@ public class RemoteAdmin {
|
|||
|
||||
/**
|
||||
* @return string representing the cluster's version
|
||||
* @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode
|
||||
* @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
|
||||
*/
|
||||
public StorageClusterStatusModel getClusterStatus() throws IOException {
|
||||
|
||||
|
@ -185,7 +185,7 @@ public class RemoteAdmin {
|
|||
|
||||
/**
|
||||
* @return string representing the cluster's version
|
||||
* @throws IOException
|
||||
* @throws IOEXception
|
||||
* if the endpoint does not exist, there is a timeout, or some other
|
||||
* general failure mode
|
||||
*/
|
||||
|
@ -357,7 +357,7 @@ public class RemoteAdmin {
|
|||
|
||||
/**
|
||||
* @return string representing the cluster's version
|
||||
* @throws IOException
|
||||
* @throws IOEXception
|
||||
* if the endpoint does not exist, there is a timeout, or some other
|
||||
* general failure mode
|
||||
*/
|
||||
|
|
|
@ -87,7 +87,7 @@ import com.sun.jersey.api.json.JSONUnmarshaller;
|
|||
*
|
||||
* <pre>
|
||||
* <complexType name="Scanner">
|
||||
* <sequence>
|
||||
* <sequence>
|
||||
* <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
|
||||
* <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
|
||||
* </sequence>
|
||||
|
|
|
@ -1096,7 +1096,7 @@ public interface RegionObserver extends Coprocessor {
|
|||
* <li>
|
||||
* <code>boolean filterRow()</code> returning true</li>
|
||||
* <li>
|
||||
* <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
|
||||
* <code>void filterRow(List<KeyValue> kvs)</code> removing all the kvs from
|
||||
* the passed List</li>
|
||||
* </ol>
|
||||
* @param c the environment provided by the region server
|
||||
|
|
|
@ -96,7 +96,7 @@ import com.sun.jersey.spi.container.servlet.ServletContainer;
|
|||
* There are three contexts:
|
||||
* "/logs/" -> points to the log directory
|
||||
* "/static/" -> points to common static files (src/webapps/static)
|
||||
* "/" -> the jsp server code from (src/webapps/<name>)
|
||||
* "/" -> the jsp server code from (src/webapps/<name>)
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -447,7 +447,7 @@ public class HttpServer implements FilterContainer {
|
|||
|
||||
/**
|
||||
* Create a status server on the given port.
|
||||
* The jsp scripts are taken from src/webapps/<name>.
|
||||
* The jsp scripts are taken from src/webapps/<name>.
|
||||
* @param name The name of the server
|
||||
* @param bindAddress The address for this server
|
||||
* @param port The port to use on the server
|
||||
|
@ -466,7 +466,7 @@ public class HttpServer implements FilterContainer {
|
|||
|
||||
/**
|
||||
* Create a status server on the given port.
|
||||
* The jsp scripts are taken from src/webapps/<name>.
|
||||
* The jsp scripts are taken from src/webapps/<name>.
|
||||
* @param name The name of the server
|
||||
* @param bindAddress The address for this server
|
||||
* @param port The port to use on the server
|
||||
|
|
|
@ -32,9 +32,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
* Create a Jetty embedded server to answer http requests. The primary goal
|
||||
* is to serve up status information for the server.
|
||||
* There are three contexts:
|
||||
* "/stacks/" -> points to stack trace
|
||||
* "/static/" -> points to common static files (src/hbase-webapps/static)
|
||||
* "/" -> the jsp server code from (src/hbase-webapps/<name>)
|
||||
* "/stacks/" -> points to stack trace
|
||||
* "/static/" -> points to common static files (src/hbase-webapps/static)
|
||||
* "/" -> the jsp server code from (src/hbase-webapps/<name>)
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class InfoServer {
|
||||
|
@ -44,7 +44,7 @@ public class InfoServer {
|
|||
|
||||
/**
|
||||
* Create a status server on the given port.
|
||||
* The jsp scripts are taken from src/hbase-webapps/<code>name</code>.
|
||||
* The jsp scripts are taken from src/hbase-webapps/<code>name<code>.
|
||||
* @param name The name of the server
|
||||
* @param bindAddress address to bind to
|
||||
* @param port The port to use on the server
|
||||
|
|
|
@ -51,34 +51,28 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
|||
* functionality is provided through the
|
||||
* {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
|
||||
* method.
|
||||
* </p>
|
||||
* <p>
|
||||
* For example <code>http://.../jmx?qry=Hadoop:*</code> will return
|
||||
* all hadoop metrics exposed through JMX.
|
||||
* </p>
|
||||
* <p>
|
||||
* The optional <code>get</code> parameter is used to query an specific
|
||||
* attribute of a JMX bean. The format of the URL is
|
||||
* <code>http://.../jmx?get=MXBeanName::AttributeName</code>
|
||||
* </p>
|
||||
* <code>http://.../jmx?get=MXBeanName::AttributeName<code>
|
||||
* <p>
|
||||
* For example
|
||||
* <code>
|
||||
* http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
|
||||
* </code> will return the cluster id of the namenode mxbean.
|
||||
* </p>
|
||||
* <p>
|
||||
* If the <code>qry</code> or the <code>get</code> parameter is not formatted
|
||||
* correctly then a 400 BAD REQUEST http response code will be returned.
|
||||
* </p>
|
||||
* <p>
|
||||
* If a resouce such as a mbean or attribute can not be found,
|
||||
* a 404 SC_NOT_FOUND http response code will be returned.
|
||||
* </p>
|
||||
* <p>
|
||||
* The return format is JSON and in the form
|
||||
* </p>
|
||||
* <pre><code>
|
||||
* <p>
|
||||
* <code><pre>
|
||||
* {
|
||||
* "beans" : [
|
||||
* {
|
||||
|
@ -87,7 +81,7 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
|||
* }
|
||||
* ]
|
||||
* }
|
||||
* </code></pre>
|
||||
* </pre></code>
|
||||
* <p>
|
||||
* The servlet attempts to convert the the JMXBeans into JSON. Each
|
||||
* bean's attributes will be converted to a JSON object member.
|
||||
|
@ -107,7 +101,6 @@ import org.apache.hadoop.hbase.util.JSONBean;
|
|||
* The bean's name and modelerType will be returned for all beans.
|
||||
*
|
||||
* Optional paramater "callback" should be used to deliver JSONP response.
|
||||
* </p>
|
||||
*
|
||||
*/
|
||||
public class JMXJsonServlet extends HttpServlet {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
* This package provides access to JMX primarily through the
|
||||
* {@link org.apache.hadoop.hbase.http.jmx.JMXJsonServlet} class.
|
||||
* <p>
|
||||
* Copied from hadoop source code.<br>
|
||||
* Copied from hadoop source code.<br/>
|
||||
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
|
||||
* </p>
|
||||
*/
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
* users a static configured user.
|
||||
* </ul>
|
||||
* <p>
|
||||
* Copied from hadoop source code.<br>
|
||||
* Copied from hadoop source code.<br/>
|
||||
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why
|
||||
* </p>
|
||||
*/
|
||||
|
|
|
@ -52,14 +52,13 @@ import org.apache.hadoop.util.StringUtils;
|
|||
|
||||
/**
|
||||
* Provides functionality to write ({@link BlockIndexWriter}) and read
|
||||
* ({@link BlockIndexReader})
|
||||
* ({@link org.apache.hadoop.hbase.io.hfile.BlockIndexReader})
|
||||
* single-level and multi-level block indexes.
|
||||
*
|
||||
* Examples of how to use the block index writer can be found in
|
||||
* {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
|
||||
* {@link HFileWriterImpl}. Examples of how to use the reader can be
|
||||
* found in {@link HFileWriterImpl} and
|
||||
* {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}.
|
||||
* found in {@link HFileWriterImpl} and TestHFileBlockIndex.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class HFileBlockIndex {
|
||||
|
|
|
@ -407,8 +407,7 @@ public final class BucketAllocator {
|
|||
/**
|
||||
* Allocate a block with specified size. Return the offset
|
||||
* @param blockSize size of block
|
||||
* @throws BucketAllocatorException
|
||||
* @throws CacheFullException
|
||||
* @throws BucketAllocatorException,CacheFullException
|
||||
* @return the offset in the IOEngine
|
||||
*/
|
||||
public synchronized long allocateBlock(int blockSize) throws CacheFullException,
|
||||
|
|
|
@ -168,7 +168,7 @@ implements InputFormat<ImmutableBytesWritable, Result> {
|
|||
|
||||
/**
|
||||
* Calculates the splits that will serve as input for the map tasks.
|
||||
*
|
||||
* <ul>
|
||||
* Splits are created in number equal to the smallest between numSplits and
|
||||
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
|
||||
* If the number of splits is smaller than the number of
|
||||
|
|
|
@ -118,11 +118,11 @@ public class MultiTableSnapshotInputFormatImpl {
|
|||
}
|
||||
|
||||
/**
|
||||
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
||||
* Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
|
||||
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
|
||||
*
|
||||
* @param conf Configuration to extract name -> list<scan> mappings from.
|
||||
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
||||
* @param conf Configuration to extract name -> list<scan> mappings from.
|
||||
* @return the snapshot name -> list<scan> mapping pushed to configuration
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException {
|
||||
|
|
|
@ -25,17 +25,14 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
* <p>
|
||||
* If other effects are needed, implement your own LogCleanerDelegate and add it to the
|
||||
* configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
|
||||
* qualified class names. The <code>HFileCleaner</code> will build the cleaner chain in
|
||||
* qualified class names. The <code>HFileCleaner<code> will build the cleaner chain in
|
||||
* order the order specified by the configuration.
|
||||
* </p>
|
||||
* <p>
|
||||
* For subclasses, setConf will be called exactly <i>once</i> before using the cleaner.
|
||||
* </p>
|
||||
* <p>
|
||||
* Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
|
||||
* HFileCleaner by reflection, classes that implements this interface <b>must</b>
|
||||
* provide a default constructor.
|
||||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {
|
||||
|
|
|
@ -232,6 +232,7 @@ public abstract class TableEventHandler extends EventHandler {
|
|||
* Gets a TableDescriptor from the masterServices. Can Throw exceptions.
|
||||
*
|
||||
* @return Table descriptor for this table
|
||||
* @throws TableExistsException
|
||||
* @throws FileNotFoundException
|
||||
* @throws IOException
|
||||
*/
|
||||
|
|
|
@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
|||
* <li>SnapshotDescription is readable</li>
|
||||
* <li>Table info is readable</li>
|
||||
* <li>Regions</li>
|
||||
* </ol>
|
||||
* <ul>
|
||||
* <li>Matching regions in the snapshot as currently in the table</li>
|
||||
* <li>{@link HRegionInfo} matches the current and stored regions</li>
|
||||
|
@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
|
|||
* <li>All the hfiles are present (either in .archive directory in the region)</li>
|
||||
* <li>All recovered.edits files are present (by name) and have the correct file size</li>
|
||||
* </ul>
|
||||
* </ol>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
|
|
|
@ -3776,6 +3776,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
* the maxSeqId for the store to be applied, else its skipped.
|
||||
* @return the sequence id of the last edit added to this region out of the
|
||||
* recovered edits log or <code>minSeqId</code> if nothing added from editlogs.
|
||||
* @throws UnsupportedEncodingException
|
||||
* @throws IOException
|
||||
*/
|
||||
protected long replayRecoveredEditsIfAny(final Path regiondir,
|
||||
|
|
|
@ -27,20 +27,18 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Compaction configuration for a particular instance of HStore.
|
||||
* Takes into account both global settings and ones set on the column family/store.
|
||||
* Control knobs for default compaction algorithm:
|
||||
* </p>
|
||||
* <p>
|
||||
* <p/>
|
||||
* maxCompactSize - upper bound on file size to be included in minor compactions
|
||||
* minCompactSize - lower bound below which compaction is selected without ratio test
|
||||
* minFilesToCompact - lower bound on number of files in any minor compaction
|
||||
* maxFilesToCompact - upper bound on number of files in any minor compaction
|
||||
* compactionRatio - Ratio used for compaction
|
||||
* minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195)
|
||||
* </p>
|
||||
* Set parameter as "hbase.hstore.compaction.<attribute>"
|
||||
* <p/>
|
||||
* Set parameter as "hbase.hstore.compaction.<attribute>"
|
||||
*/
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
|
|
@ -226,7 +226,7 @@ public abstract class Compactor {
|
|||
* @param scanner Where to read from.
|
||||
* @param writer Where to write to.
|
||||
* @param smallestReadPoint Smallest read point.
|
||||
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
|
||||
* @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
|
||||
* @return Whether compaction ended; false if it was interrupted for some reason.
|
||||
*/
|
||||
protected boolean performCompaction(InternalScanner scanner, CellSink writer,
|
||||
|
|
|
@ -32,31 +32,26 @@ import java.io.IOException;
|
|||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* This coprocessor 'shallows' all the writes. It allows to test a pure
|
||||
* write workload, going through all the communication layers.
|
||||
* The reads will work as well, but they as we never write, they will always always
|
||||
* return an empty structure. The WAL is also skipped.
|
||||
* Obviously, the region will never be split automatically. It's up to the user
|
||||
* to split and move it.
|
||||
* </p>
|
||||
* <p>
|
||||
* <p/>
|
||||
* For a table created like this:
|
||||
* create 'usertable', {NAME => 'f1', VERSIONS => 1}
|
||||
* </p>
|
||||
* <p>
|
||||
* create 'usertable', {NAME => 'f1', VERSIONS => 1}
|
||||
* <p/>
|
||||
* You can then add the coprocessor with this command:
|
||||
* alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
|
||||
* </p>
|
||||
* <p>
|
||||
* alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
|
||||
* <p/>
|
||||
* And then
|
||||
* put 'usertable', 'f1', 'f1', 'f1'
|
||||
* </p>
|
||||
* <p>
|
||||
* <p/>
|
||||
* scan 'usertable'
|
||||
* Will return:
|
||||
* 0 row(s) in 0.0050 seconds
|
||||
* </p>
|
||||
* <p/>
|
||||
*/
|
||||
public class WriteSinkCoprocessor extends BaseRegionObserver {
|
||||
private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);
|
||||
|
|
|
@ -88,7 +88,7 @@ public class BloomFilterChunk implements BloomFilterBase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Determines & initializes bloom filter meta data from user config. Call
|
||||
* Determines & initializes bloom filter meta data from user config. Call
|
||||
* {@link #allocBloom()} to allocate bloom filter data.
|
||||
*
|
||||
* @param maxKeys Maximum expected number of keys that will be stored in this
|
||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
|
|||
/**
|
||||
* This class marches through all of the region's hfiles and verifies that
|
||||
* they are all valid files. One just needs to instantiate the class, use
|
||||
* checkTables(List<Path>) and then retrieve the corrupted hfiles (and
|
||||
* checkTables(List<Path>) and then retrieve the corrupted hfiles (and
|
||||
* quarantined files if in quarantining mode)
|
||||
*
|
||||
* The implementation currently parallelizes at the regionDir level.
|
||||
|
|
|
@ -304,8 +304,8 @@ public class DefaultWALProvider implements WALProvider {
|
|||
* This function returns region server name from a log file name which is in one of the following
|
||||
* formats:
|
||||
* <ul>
|
||||
* <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...</li>
|
||||
* <li>hdfs://<name node>/hbase/.logs/<server name>/...</li>
|
||||
* <li>hdfs://<name node>/hbase/.logs/<server name>-splitting/...
|
||||
* <li>hdfs://<name node>/hbase/.logs/<server name>/...
|
||||
* </ul>
|
||||
* @param logFile
|
||||
* @return null if the passed in logFile isn't a valid WAL file path
|
||||
|
|
|
@ -2228,13 +2228,13 @@ public class WALSplitter {
|
|||
}
|
||||
|
||||
/**
|
||||
* This function is used to construct mutations from a WALEntry. It also
|
||||
* reconstructs WALKey & WALEdit from the passed in WALEntry
|
||||
* This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
|
||||
* WALEdit from the passed in WALEntry
|
||||
* @param entry
|
||||
* @param cells
|
||||
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
|
||||
* extracted from the passed in WALEntry.
|
||||
* @return list of Pair<MutationType, Mutation> to be replayed
|
||||
* @return list of Pair<MutationType, Mutation> to be replayed
|
||||
* @throws IOException
|
||||
*/
|
||||
public static List<MutationReplay> getMutationsFromWALEntry(WALEntry entry, CellScanner cells,
|
||||
|
|
|
@ -417,7 +417,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock {
|
|||
|
||||
/**
|
||||
* Visits the locks (both held and attempted) with the given MetadataHandler.
|
||||
* @throws IOException If there is an unrecoverable error
|
||||
* @throws InterruptedException If there is an unrecoverable error
|
||||
*/
|
||||
public void visitLocks(MetadataHandler handler) throws IOException {
|
||||
List<String> children;
|
||||
|
|
Loading…
Reference in New Issue