HBASE-22572 Javadoc Warnings: @link reference not found (#306)
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com> Signed-off-by: stack <stack@apache.org>
This commit is contained in:
parent
7cd1caa1ec
commit
8fa7800389
|
@ -19,7 +19,7 @@
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the {@link org.apache.hadoop.hbase.filter} package.
|
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
|
||||||
*
|
*
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
package org.apache.hadoop.hbase.testclassification;
|
package org.apache.hadoop.hbase.testclassification;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tag a test as related to the {@link org.apache.hadoop.hbase.io} package. Things like HFile and
|
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
|
||||||
* the like.
|
* the like.
|
||||||
*
|
*
|
||||||
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
* @see org.apache.hadoop.hbase.testclassification.ClientTests
|
||||||
|
|
|
@ -45,4 +45,5 @@
|
||||||
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
|
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
|
||||||
<suppress checks="ImportOrder" message="Extra separation in import group before"/>
|
<suppress checks="ImportOrder" message="Extra separation in import group before"/>
|
||||||
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.thrift.DemoClient.java"/>
|
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.thrift.DemoClient.java"/>
|
||||||
|
<suppress checks="LineLength" files="org.apache.hadoop.hbase.util.ZKDataMigrator.java"/>
|
||||||
</suppressions>
|
</suppressions>
|
||||||
|
|
|
@ -246,8 +246,7 @@ public class MetricsConnection implements StatisticTrackable {
|
||||||
/** Default load factor from {@link java.util.HashMap#DEFAULT_LOAD_FACTOR} */
|
/** Default load factor from {@link java.util.HashMap#DEFAULT_LOAD_FACTOR} */
|
||||||
private static final float LOAD_FACTOR = 0.75f;
|
private static final float LOAD_FACTOR = 0.75f;
|
||||||
/**
|
/**
|
||||||
* Anticipated number of concurrent accessor threads, from
|
* Anticipated number of concurrent accessor threads
|
||||||
* {@link ConnectionImplementation#getBatchPool()}
|
|
||||||
*/
|
*/
|
||||||
private static final int CONCURRENCY_LEVEL = 256;
|
private static final int CONCURRENCY_LEVEL = 256;
|
||||||
|
|
||||||
|
|
|
@ -26,9 +26,9 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
* A {@code WeakReference} based shared object pool.
|
* A {@code WeakReference} based shared object pool.
|
||||||
* The objects are kept in weak references and
|
* The objects are kept in weak references and
|
||||||
* associated with keys which are identified by the {@code equals} method.
|
* associated with keys which are identified by the {@code equals} method.
|
||||||
* The objects are created by {@link ObjectFactory} on demand.
|
* The objects are created by {@link org.apache.hadoop.hbase.util.ObjectPool.ObjectFactory} on
|
||||||
* The object creation is expected to be lightweight,
|
* demand. The object creation is expected to be lightweight, and the objects may be excessively
|
||||||
* and the objects may be excessively created and discarded.
|
* created and discarded.
|
||||||
* Thread safe.
|
* Thread safe.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
|
@ -32,7 +32,7 @@ import org.junit.runner.notification.RunListener;
|
||||||
import org.junit.runner.notification.RunListener.ThreadSafe;
|
import org.junit.runner.notification.RunListener.ThreadSafe;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A RunListener to confirm that we have a {@link CategoryBasedTimeout} class rule for every test.
|
* A RunListener to confirm that we have a {@link HBaseClassTestRule} class rule for every test.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@ThreadSafe
|
@ThreadSafe
|
||||||
|
|
|
@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.util.Threads;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test implementation of a coprocessor endpoint exposing the
|
* Test implementation of a coprocessor endpoint exposing the
|
||||||
* {@link TestRpcServiceProtos.TestProtobufRpcProto} service methods. For internal use by unit tests
|
* {@link org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto}
|
||||||
* only.
|
* service methods. For internal use by unit tests only.
|
||||||
*/
|
*/
|
||||||
public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto
|
public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto
|
||||||
implements MasterCoprocessor, RegionCoprocessor {
|
implements MasterCoprocessor, RegionCoprocessor {
|
||||||
|
|
|
@ -66,12 +66,11 @@ import java.util.Map;
|
||||||
* </pre>
|
* </pre>
|
||||||
* Internally, this input format restores each snapshot into a subdirectory of the given tmp
|
* Internally, this input format restores each snapshot into a subdirectory of the given tmp
|
||||||
* directory. Input splits and
|
* directory. Input splits and
|
||||||
* record readers are created as described in {@link org.apache.hadoop.hbase.mapreduce
|
* record readers are created as described in
|
||||||
* .TableSnapshotInputFormat}
|
* {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}
|
||||||
* (one per region).
|
* (one per region).
|
||||||
* See {@link TableSnapshotInputFormat} for more notes on
|
* See {@link TableSnapshotInputFormat} for more notes on
|
||||||
* permissioning; the
|
* permissioning; the same caveats apply here.
|
||||||
* same caveats apply here.
|
|
||||||
*
|
*
|
||||||
* @see TableSnapshotInputFormat
|
* @see TableSnapshotInputFormat
|
||||||
* @see org.apache.hadoop.hbase.client.TableSnapshotScanner
|
* @see org.apache.hadoop.hbase.client.TableSnapshotScanner
|
||||||
|
|
|
@ -44,9 +44,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shared implementation of mapreduce code over multiple table snapshots.
|
* Shared implementation of mapreduce code over multiple table snapshots.
|
||||||
* Utilized by both mapreduce ({@link org.apache.hadoop.hbase.mapreduce
|
* Utilized by both mapreduce
|
||||||
* .MultiTableSnapshotInputFormat} and mapred
|
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred
|
||||||
* ({@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations.
|
* {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({ "HBase" })
|
@InterfaceAudience.LimitedPrivate({ "HBase" })
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -186,7 +186,7 @@ To implement an Endpoint, you need to:
|
||||||
</ul>
|
</ul>
|
||||||
<p>
|
<p>
|
||||||
For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample
|
For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample
|
||||||
code, see the {@link org.apache.hadoop.hbase.client.coprocessor} package documentation.
|
code, see the {@code org.apache.hadoop.hbase.client.coprocessor} package documentation.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h2><a name="load">Coprocessor loading</a></h2>
|
<h2><a name="load">Coprocessor loading</a></h2>
|
||||||
|
|
|
@ -28,6 +28,7 @@ import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
@ -1026,8 +1027,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
||||||
|
|
||||||
abstract double cost();
|
abstract double cost();
|
||||||
|
|
||||||
|
@SuppressWarnings("checkstyle:linelength")
|
||||||
/**
|
/**
|
||||||
* Function to compute a scaled cost using {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics}.
|
* Function to compute a scaled cost using
|
||||||
|
* {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics#DescriptiveStatistics()}.
|
||||||
* It assumes that this is a zero sum set of costs. It assumes that the worst case
|
* It assumes that this is a zero sum set of costs. It assumes that the worst case
|
||||||
* possible is all of the elements in one region server and the rest having 0.
|
* possible is all of the elements in one region server and the rest having 0.
|
||||||
*
|
*
|
||||||
|
|
|
@ -93,7 +93,9 @@ public class ZKDataMigrator {
|
||||||
* Gets table state from ZK.
|
* Gets table state from ZK.
|
||||||
* @param zkw ZKWatcher instance to use
|
* @param zkw ZKWatcher instance to use
|
||||||
* @param tableName table we're checking
|
* @param tableName table we're checking
|
||||||
* @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
|
* @return Null or
|
||||||
|
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State}
|
||||||
|
* found in znode.
|
||||||
* @throws KeeperException
|
* @throws KeeperException
|
||||||
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
|
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -176,7 +176,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a table of name <code>name</code> with {@link COLUMNS} for
|
* Create a table of name {@code name} with {@link #COLUMNS} for
|
||||||
* families.
|
* families.
|
||||||
* @param name Name to give table.
|
* @param name Name to give table.
|
||||||
* @return Column descriptor.
|
* @return Column descriptor.
|
||||||
|
@ -186,7 +186,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a table of name <code>name</code> with {@link COLUMNS} for
|
* Create a table of name {@code name} with {@link #COLUMNS} for
|
||||||
* families.
|
* families.
|
||||||
* @param name Name to give table.
|
* @param name Name to give table.
|
||||||
* @param versions How many versions to allow per column.
|
* @param versions How many versions to allow per column.
|
||||||
|
@ -199,7 +199,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a table of name <code>name</code> with {@link COLUMNS} for
|
* Create a table of name {@code name} with {@link #COLUMNS} for
|
||||||
* families.
|
* families.
|
||||||
* @param name Name to give table.
|
* @param name Name to give table.
|
||||||
* @param versions How many versions to allow per column.
|
* @param versions How many versions to allow per column.
|
||||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@link ClusterConnection} testing utility.
|
* {@link Connection} testing utility.
|
||||||
*/
|
*/
|
||||||
public class HConnectionTestingUtility {
|
public class HConnectionTestingUtility {
|
||||||
/*
|
/*
|
||||||
|
@ -48,11 +48,19 @@ public class HConnectionTestingUtility {
|
||||||
* {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access.
|
* {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access.
|
||||||
*/
|
*/
|
||||||
/**
|
/**
|
||||||
|
<<<<<<< HEAD
|
||||||
* Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
|
* Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
|
||||||
* configuration instance. Minimally the mock will return
|
* configuration instance. Minimally the mock will return
|
||||||
* <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
|
* <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
|
||||||
* Be sure to shutdown the connection when done by calling
|
* Be sure to shutdown the connection when done by calling
|
||||||
* {@link Connection#close()} else it will stick around; this is probably not what you want.
|
* {@link Connection#close()} else it will stick around; this is probably not what you want.
|
||||||
|
=======
|
||||||
|
* Get a Mocked {@link Connection} that goes with the passed <code>conf</code>
|
||||||
|
* configuration instance. Minimally the mock will return <code>conf</conf> when
|
||||||
|
* {@link Connection#getConfiguration()} is invoked. Be sure to shutdown the
|
||||||
|
* connection when done by calling {@link Connection#close()} else it will stick around; this is
|
||||||
|
* probably not what you want.
|
||||||
|
>>>>>>> fabf2b8282... HBASE-22572 Javadoc Warnings: @link reference not found (#306)
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return ClusterConnection object for <code>conf</code>
|
* @return ClusterConnection object for <code>conf</code>
|
||||||
* @throws ZooKeeperConnectionException
|
* @throws ZooKeeperConnectionException
|
||||||
|
|
|
@ -53,7 +53,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Run Increment tests that use the HBase clients; {@link HTable}.
|
* Run Increment tests that use the HBase clients; {@link TableBuilder}.
|
||||||
*
|
*
|
||||||
* Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
|
* Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
|
||||||
* do a rolling restart of the single regionserver so that it can pick up the go fast configuration.
|
* do a rolling restart of the single regionserver so that it can pick up the go fast configuration.
|
||||||
|
|
|
@ -48,7 +48,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.HTable}.
|
* Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.TableBuilder}.
|
||||||
* Sets up the HBase mini cluster once at start and runs through all client tests.
|
* Sets up the HBase mini cluster once at start and runs through all client tests.
|
||||||
* Each creates a table named for the method and does its stuff against that.
|
* Each creates a table named for the method and does its stuff against that.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -42,7 +42,7 @@ import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the {@link MemStoreChunkPool} class
|
* Test the {@link org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool} class
|
||||||
*/
|
*/
|
||||||
@Category({RegionServerTests.class, SmallTests.class})
|
@Category({RegionServerTests.class, SmallTests.class})
|
||||||
public class TestMemStoreChunkPool {
|
public class TestMemStoreChunkPool {
|
||||||
|
|
|
@ -146,7 +146,7 @@ public class HBaseZKTestingUtility extends HBaseCommonTestingUtility {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)} or does nothing.
|
* Shuts down zk cluster created by call to {@link #startMiniZKCluster()} or does nothing.
|
||||||
* @see #startMiniZKCluster()
|
* @see #startMiniZKCluster()
|
||||||
*/
|
*/
|
||||||
public void shutdownMiniZKCluster() throws IOException {
|
public void shutdownMiniZKCluster() throws IOException {
|
||||||
|
|
Loading…
Reference in New Issue