HBASE-20478 Update checkstyle to v8.2

Cannot go to latest (8.9) yet due to
  https://github.com/checkstyle/checkstyle/issues/5279

* move hbaseanti import checks to checkstyle
* implment a few missing equals checks, and ignore one
* fix lots of javadoc errors

Signed-off-by: Sean Busbey <busbey@apache.org>
This commit is contained in:
Mike Drob 2018-04-26 20:12:07 -05:00 committed by Sean Busbey
parent 8edd5d948a
commit b04c976fe6
42 changed files with 159 additions and 136 deletions

View File

@ -661,24 +661,6 @@ function hbaseanti_patchfile
((result=result+1))
fi
warnings=$(${GREP} -c 'import org.apache.hadoop.classification' "${patchfile}")
if [[ ${warnings} -gt 0 ]]; then
add_vote_table -1 hbaseanti "" "The patch appears use Hadoop classification instead of HBase."
((result=result+1))
fi
warnings=$(${GREP} -c 'import org.codehaus.jackson' "${patchfile}")
if [[ ${warnings} -gt 0 ]]; then
add_vote_table -1 hbaseanti "" "The patch appears use Jackson 1 classes/annotations."
((result=result+1))
fi
warnings=$(${GREP} -cE 'org.apache.commons.logging.Log(Factory|;)' "${patchfile}")
if [[ ${warnings} -gt 0 ]]; then
add_vote_table -1 hbaseanti "" "The patch appears to use commons-logging instead of slf4j."
((result=result+1))
fi
if [[ ${result} -gt 0 ]]; then
return 1
fi

View File

@ -37,4 +37,6 @@
<suppress checks="VisibilityModifier" files=".*/src/test/.*\.java"/>
<suppress checks="InterfaceIsTypeCheck" files=".*/src/main/.*\.java"/>
<suppress checks="MethodLength" files="Branch1CoprocessorMethods.java"/>
<suppress checks="EmptyBlockCheck" files="TBoundedThreadPoolServer.java"/>
<suppress checks="EqualsHashCode" files="StartcodeAgnosticServerName.java"/>
</suppressions>

View File

@ -51,6 +51,7 @@
<module name="VisibilityModifier">
<property name="packageAllowed" value="true"/>
<property name="protectedAllowed" value="true"/>
<property name="allowPublicImmutableFields" value="true"/>
</module>
<!-- Coding Checks
@ -84,7 +85,12 @@
org.apache.commons.collections4,
org.apache.commons.lang,
org.apache.curator.shaded,
org.apache.htrace.shaded"/>
org.apache.hadoop.classification,
org.apache.htrace.shaded,
org.codehaus.jackson"/>
<property name="illegalClasses" value="
org.apache.commons.logging.Log,
org.apache.commons.logging.LogFactory"/>
</module>
<!-- Javadoc Checks
http://checkstyle.sourceforge.net/config_javadoc.html -->

View File

@ -232,7 +232,7 @@ public interface ClusterConnection extends Connection {
/**
* Establishes a connection to the region server at the specified address.
* @param serverName
* @param serverName the region server to connect to
* @return proxy for HRegionServer
* @throws IOException if a remote or network exception occurs
*/
@ -242,7 +242,7 @@ public interface ClusterConnection extends Connection {
* Establishes a connection to the region server at the specified address, and returns
* a region client protocol.
*
* @param serverName
* @param serverName the region server to connect to
* @return ClientProtocol proxy for RegionServer
* @throws IOException if a remote or network exception occurs
*

View File

@ -26,7 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience;
/**
* Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}.
* The cfs is a map of <ColumnFamily, ReplicationScope>.
* The cfs is a map of &lt;ColumnFamily, ReplicationScope>.
*/
@InterfaceAudience.Public
public class TableCFs {

View File

@ -1561,7 +1561,7 @@ public final class ProtobufUtil {
/**
* @see {@link #buildGetServerInfoRequest()}
* @see #buildGetServerInfoRequest()
*/
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
GetServerInfoRequest.newBuilder().build();

View File

@ -84,7 +84,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* <tr><td>u.&lt;user&gt;</td><td>q:s</td><td>&lt;global-quotas&gt;</td></tr>
* <tr><td>u.&lt;user&gt;</td><td>q:s.&lt;table&gt;</td><td>&lt;table-quotas&gt;</td></tr>
* <tr><td>u.&lt;user&gt;</td><td>q:s.&lt;ns&gt;</td><td>&lt;namespace-quotas&gt;</td></tr>
* </table
* </table>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving

View File

@ -1026,7 +1026,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildRollWALWriterRequest()}
* @see #buildRollWALWriterRequest()
*/
private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder()
.build();
@ -1040,7 +1040,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildGetServerInfoRequest()}
* @see #buildGetServerInfoRequest()
*/
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder()
.build();
@ -1522,7 +1522,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildCatalogScanRequest}
* @see #buildCatalogScanRequest
*/
private static final RunCatalogScanRequest CATALOG_SCAN_REQUEST =
RunCatalogScanRequest.newBuilder().build();
@ -1544,7 +1544,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildIsCatalogJanitorEnabledRequest()}
* @see #buildIsCatalogJanitorEnabledRequest()
*/
private static final IsCatalogJanitorEnabledRequest IS_CATALOG_JANITOR_ENABLED_REQUEST =
IsCatalogJanitorEnabledRequest.newBuilder().build();
@ -1558,7 +1558,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildCleanerChoreRequest}
* @see #buildRunCleanerChoreRequest()
*/
private static final RunCleanerChoreRequest CLEANER_CHORE_REQUEST =
RunCleanerChoreRequest.newBuilder().build();
@ -1580,7 +1580,7 @@ public final class RequestConverter {
}
/**
* @see {@link #buildIsCleanerChoreEnabledRequest()}
* @see #buildIsCleanerChoreEnabledRequest()
*/
private static final IsCleanerChoreEnabledRequest IS_CLEANER_CHORE_ENABLED_REQUEST =
IsCleanerChoreEnabledRequest.newBuilder().build();

View File

@ -26,7 +26,7 @@ import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
* An immutable type to hold a hostname and port combo, like an Endpoint
* or java.net.InetSocketAddress (but without danger of our calling
* resolve -- we do NOT want a resolve happening every time we want
* to hold a hostname and port combo). This class is also <<Comparable>>.
* to hold a hostname and port combo). This class is also {@link Comparable}
* <p>In implementation this class is a facade over Guava's {@link HostAndPort}.
* We cannot have Guava classes in our API hence this Type.
*/

View File

@ -315,6 +315,20 @@ public abstract class AbstractByteRange implements ByteRange {
hash = UNSET_HASH_VALUE;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof ByteRange)) {
return false;
}
return compareTo((ByteRange) obj) == 0;
}
/**
* Bitwise comparison of each byte in the array. Unsigned comparison, not
* paying attention to java's signed bytes.

View File

@ -30,12 +30,13 @@ import org.apache.hbase.thirdparty.com.google.common.escape.Escapers;
/**
* Utility class for converting objects to JRuby.
*
* It handles null, Boolean, Number, String, byte[], List<Object>, Map<String, Object> structures.
* It handles null, Boolean, Number, String, byte[], List&lt;Object>, Map&lt;String, Object>
* structures.
*
* <p>
* E.g.
* <pre>
* Map<String, Object> map = new LinkedHashMap<>();
* Map&lt;String, Object> map = new LinkedHashMap&lt;>();
* map.put("null", null);
* map.put("boolean", true);
* map.put("number", 1);
@ -48,7 +49,8 @@ import org.apache.hbase.thirdparty.com.google.common.escape.Escapers;
* <p>
* Calling {@link #print(Object)} method will result:
* <pre>
* { null => '', boolean => 'true', number => '1', string => 'str', binary => '010203', list => [ '1', '2', 'true' ] }
* { null => '', boolean => 'true', number => '1', string => 'str',
* binary => '010203', list => [ '1', '2', 'true' ] }
* </pre>
* </p>
*/

View File

@ -339,7 +339,7 @@ public class OrderedBytes {
/**
* Perform unsigned comparison between two long values. Conforms to the same interface as
* {@link org.apache.hadoop.hbase.CellComparator#COMPARATOR#compare(Object, Object)}.
* {@link org.apache.hadoop.hbase.CellComparator}.
*/
private static int unsignedCmp(long x1, long x2) {
int cmp;

View File

@ -83,7 +83,7 @@ public final class Waiter {
/**
* A predicate 'closure' used by the {@link Waiter#waitFor(Configuration, long, Predicate)} and
* {@link Waiter#waitFor(Configuration, long, Predicate)} and
* {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods.
* {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)} methods.
*/
@InterfaceAudience.Private
public interface Predicate<E extends Exception> {

View File

@ -95,6 +95,9 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
done.run(null);
}
/**
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
convert(PrepareBulkLoadRequest request)
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {
@ -121,8 +124,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
}
/**
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
* @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest
convert(CleanupBulkLoadRequest request)
@ -153,6 +155,9 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
}
/**
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest
convert(BulkLoadHFileRequest request)
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {

View File

@ -322,10 +322,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
* will be accessed as the passed user, by sending user.name request
* parameter.
*
* @param urlstring
* @param userName
* @return
* @throws IOException
* @param urlstring The url to access
* @param userName The user to perform access as
* @return The HTTP response code
* @throws IOException if there is a problem communicating with the server
*/
static int getHttpStatusCode(String urlstring, String userName)
throws IOException {

View File

@ -38,10 +38,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
/**
* Base class for HBase integration tests that want to use the Chaos Monkey.
* Usage: bin/hbase <sub_class_of_IntegrationTestBase> <options>
* Usage: bin/hbase &lt;sub_class_of_IntegrationTestBase> &lt;options>
* Options: -h,--help Show usage
* -m,--monkey <arg> Which chaos monkey to run
* -monkeyProps <arg> The properties file for specifying chaos monkey properties.
* -m,--monkey &lt;arg> Which chaos monkey to run
* -monkeyProps &lt;arg> The properties file for specifying chaos monkey properties.
* -ncc Option to not clean up the cluster at the end.
*/
public abstract class IntegrationTestBase extends AbstractHBaseTool {

View File

@ -50,7 +50,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
* with the replication of the edits before read_delay_ms to the given region replica id so that
* the read and verify will not fail.
*
* The job will run for <b>at least<b> given runtime (default 10min) by running a concurrent
* The job will run for <b>at least</b> given runtime (default 10min) by running a concurrent
* writer and reader workload followed by a concurrent updater and reader workload for
* num_keys_per_server.
* <p>

View File

@ -107,8 +107,8 @@ public class TestTableInputFormat {
/**
* Setup a table with two rows and values.
*
* @param tableName
* @return
* @param tableName the name of the table to create
* @return A Table instance for the created table.
* @throws IOException
*/
public static Table createTable(byte[] tableName) throws IOException {
@ -119,7 +119,7 @@ public class TestTableInputFormat {
* Setup a table with two rows and values per column family.
*
* @param tableName
* @return
* @return A Table instance for the created table.
* @throws IOException
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {

View File

@ -539,9 +539,9 @@ public class TestImportExport {
}
/**
* Count the number of keyvalues in the specified table for the given timerange
* @param table
* @return
* Count the number of keyvalues in the specified table with the given filter
* @param table the table to scan
* @return the number of keyvalues found
* @throws IOException
*/
private int getCount(Table table, Filter filter) throws IOException {

View File

@ -105,7 +105,7 @@ public class TestTableInputFormat {
* Setup a table with two rows and values.
*
* @param tableName
* @return
* @return A Table instance for the created table.
* @throws IOException
*/
public static Table createTable(byte[] tableName) throws IOException {
@ -116,7 +116,7 @@ public class TestTableInputFormat {
* Setup a table with two rows and values per column family.
*
* @param tableName
* @return
* @return A Table instance for the created table.
* @throws IOException
*/
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {

View File

@ -820,10 +820,10 @@ public class LoadTestTool extends AbstractHBaseTool {
/**
* When NUM_TABLES is specified, the function starts multiple worker threads
* which individually start a LoadTestTool instance to load a table. Each
* table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
* table name is in format &lt;tn>_&lt;index>. For example, "-tn test -num_tables 2"
* , table names will be "test_1", "test_2"
*
* @throws IOException
* @throws IOException if one of the load tasks is unable to complete
*/
private int parallelLoadTables()
throws IOException {

View File

@ -63,4 +63,8 @@ class StartcodeAgnosticServerName extends ServerName {
public int hashCode() {
return getHostAndPort().hashCode();
}
// Do not need @Override #equals() because super.equals() delegates to compareTo(), which ends
// up doing the right thing. We have a test for it, so the checkstyle warning here would be a
// false positive.
}

View File

@ -1481,7 +1481,7 @@ public class HFileBlockIndex {
* The same as {@link #add(byte[], long, int, long)} but does not take the
* key/value into account. Used for single-level indexes.
*
* @see {@link #add(byte[], long, int, long)}
* @see #add(byte[], long, int, long)
*/
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
add(firstKey, blockOffset, onDiskDataSize, -1);

View File

@ -585,7 +585,7 @@ public abstract class RpcServer implements RpcServerInterface,
}
/**
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer).
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}.
* Only one of readCh or writeCh should be non-null.
*
* @param readCh read channel

View File

@ -57,7 +57,7 @@ interface StoreFlushContext {
*
* A very short operation
*
* @return
* @return whether compaction is required
* @throws IOException
*/
boolean commit(MonitoredTask status) throws IOException;

View File

@ -60,7 +60,7 @@ public interface ColumnTracker extends ShipperListener {
* method based on the return type (INCLUDE) of this method. The values that can be returned by
* this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and
* {@link MatchCode#SEEK_NEXT_ROW}.
* @param cell
* @param cell a cell with the column to match against
* @param type The type of the Cell
* @return The match code instance.
* @throws IOException in case there is an internal consistency problem caused by a data
@ -77,7 +77,7 @@ public interface ColumnTracker extends ShipperListener {
* Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in
* the {@link #checkColumn(Cell, byte)} method and perform all the operations in this
* checkVersions method.
* @param cell
* @param cell a cell with the column to match against
* @param timestamp The timestamp of the cell.
* @param type the type of the key value (Put/Delete)
* @param ignoreCount indicates if the KV needs to be excluded while counting (used during

View File

@ -165,7 +165,7 @@ class AccessControlFilter extends FilterBase {
* @param pbBytes A pb serialized {@link AccessControlFilter} instance
* @return An instance of {@link AccessControlFilter} made from <code>bytes</code>
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray()
*/
public static AccessControlFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -590,7 +590,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Start a minidfscluster.
* @param servers How many DNs to start.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
@ -605,7 +605,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* datanodes will have the same host name.
* @param hosts hostnames DNs to run on.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(final String hosts[])
@ -623,7 +623,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param servers How many DNs to start.
* @param hosts hostnames DNs to run on.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
@ -767,7 +767,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Start up a minicluster of hbase, dfs, and zookeeper.
* @throws Exception
* @return Mini hbase cluster instance created.
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster() throws Exception {
return startMiniCluster(1, 1);
@ -777,7 +777,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
* @throws Exception
* @return Mini hbase cluster instance created.
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
@ -789,7 +789,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* (will overwrite if dir already exists)
* @throws Exception
* @return Mini hbase cluster instance created.
* @see {@link #shutdownMiniDFSCluster()}
* @see #shutdownMiniDFSCluster()
*/
public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
throws Exception {
@ -806,7 +806,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numSlaves)
@ -823,7 +823,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* Start minicluster. Whether to create a new root or data dir path even if such a path
* has been created earlier is decided based on flag <code>create</code>
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@ -835,7 +835,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* start minicluster
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@ -872,7 +872,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* If you start MiniDFSCluster without host names,
* all instances of the datanodes will have the same host name.
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@ -914,7 +914,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @param regionserverClass The class to use as HRegionServer, or null for
* default
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @see #shutdownMiniCluster()
* @return Mini hbase cluster instance created.
*/
public MiniHBaseCluster startMiniCluster(final int numMasters,
@ -1003,7 +1003,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
* @return Reference to the hbase mini hbase cluster.
* @throws IOException
* @throws InterruptedException
* @see {@link #startMiniCluster()}
* @see #startMiniCluster()
*/
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
@ -1088,7 +1088,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Stops mini hbase, zk, and hdfs clusters.
* @throws IOException
* @see {@link #startMiniCluster(int)}
* @see #startMiniCluster(int)
*/
public void shutdownMiniCluster() throws Exception {
LOG.info("Shutting down minicluster");
@ -1746,10 +1746,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Create an HRegion that writes to the local tmp dirs
* @param desc
* @param startKey
* @param endKey
* @return
* @param desc a table descriptor indicating which table the region belongs to
* @param startKey the start boundary of the region
* @param endKey the end boundary of the region
* @return a region that writes to local dir for testing
* @throws IOException
*/
public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,

View File

@ -163,26 +163,25 @@ public abstract class MultithreadedTestUtil {
* Verify that no assertions have failed inside a future.
* Used for unit tests that spawn threads. E.g.,
* <p>
* <code>
* List<Future<Void>> results = Lists.newArrayList();
* Future<Void> f = executor.submit(new Callable<Void> {
* <pre>
* List&lt;Future&lt;Void>> results = Lists.newArrayList();
* Future&lt;Void> f = executor.submit(new Callable&lt;Void> {
* public Void call() {
* assertTrue(someMethod());
* }
* });
* results.add(f);
* assertOnFutures(results);
* </code>
* </pre>
* @param threadResults A list of futures
* @param <T>
* @throws InterruptedException If interrupted when waiting for a result
* from one of the futures
* @throws ExecutionException If an exception other than AssertionError
* occurs inside any of the futures
*/
public static <T> void assertOnFutures(List<Future<T>> threadResults)
public static void assertOnFutures(List<Future<?>> threadResults)
throws InterruptedException, ExecutionException {
for (Future<T> threadResult : threadResults) {
for (Future<?> threadResult : threadResults) {
try {
threadResult.get();
} catch (ExecutionException e) {

View File

@ -122,7 +122,7 @@ public class TestMetaTableAccessorNoCluster {
/**
* Test that MetaTableAccessor will ride over server throwing
* "Server not running" IOEs.
* @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
* @see <a href="https://issues.apache.org/jira/browse/HBASE-3446">HBASE-3446</a>
* @throws IOException
* @throws InterruptedException
*/

View File

@ -603,12 +603,12 @@ public class TestPartialResultsFromClientSide {
/**
* Make puts to put the input value into each combination of row, family, and qualifier
* @param rows
* @param families
* @param qualifiers
* @param value
* @return
* @throws IOException
* @param rows the rows to use
* @param families the families to use
* @param qualifiers the qualifiers to use
* @param value the values to use
* @return the dot product of the given rows, families, qualifiers, and values
* @throws IOException if there is a problem creating one of the Put objects
*/
static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
byte[] value) throws IOException {
@ -632,11 +632,11 @@ public class TestPartialResultsFromClientSide {
/**
* Make key values to represent each possible combination of family and qualifier in the specified
* row.
* @param row
* @param families
* @param qualifiers
* @param value
* @return
* @param row the row to use
* @param families the families to use
* @param qualifiers the qualifiers to use
* @param value the values to use
* @return the dot product of the given families, qualifiers, and values for a given row
*/
static ArrayList<Cell> createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers,
byte[] value) {
@ -772,9 +772,9 @@ public class TestPartialResultsFromClientSide {
/**
* Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
* return total cell count
* @param scanner
* @return
* @throws Exception
* @param scanner the scanner to exhaust
* @return the number of cells counted
* @throws Exception if there is a problem retrieving cells from the scanner
*/
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
Result result = null;

View File

@ -424,9 +424,9 @@ public class TestHFileArchiving {
/**
* Get the names of all the files below the given directory
* @param fs
* @param archiveDir
* @return
* @param fs the file system to inspect
* @param archiveDir the directory in which to look
* @return a list of all files in the directory and sub-directories
* @throws IOException
*/
private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {

View File

@ -50,7 +50,7 @@ public class HConnectionTestingUtility {
/**
* Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
* configuration instance. Minimally the mock will return
* <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
* &lt;code>conf&lt;/conf> when {@link ClusterConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling
* {@link Connection#close()} else it will stick around; this is probably not what you want.
* @param conf configuration
@ -148,7 +148,7 @@ public class HConnectionTestingUtility {
* @param conf configuration
* @return ClusterConnection object for <code>conf</code>
* @throws ZooKeeperConnectionException
* @see @link
* [Dead link]: See also
* {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
*/
public static ClusterConnection getSpiedConnection(final Configuration conf)

View File

@ -24,9 +24,15 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -91,6 +97,24 @@ class StringRange {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof StringRange)) {
return false;
}
StringRange oth = (StringRange) obj;
return this.startInclusive == oth.startInclusive &&
this.endInclusive == oth.endInclusive &&
Objects.equals(this.start, oth.start) &&
Objects.equals(this.end, oth.end);
}
@Override
public String toString() {
String result = (this.startInclusive ? "[" : "(")
@ -133,33 +157,21 @@ public class TestColumnRangeFilter {
@Rule
public TestName name = new TestName();
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparator;
/**
* Generate random <key, value> pairs.
* Generate random &lt;key, value> pairs.
* <p>
* Copied from
* <a href="https://issues.apache.org/jira/browse/HADOOP-3315">hadoop-3315 tfile</a>.

View File

@ -401,7 +401,7 @@ public class TestRegionPlacement {
/**
* Verify the number of user regions is assigned to the primary
* region server based on the plan is expected
* @param expectedNum.
* @param expectedNum the expected number of assigned regions
* @throws IOException
*/
private void verifyRegionOnPrimaryRS(int expectedNum)
@ -536,9 +536,8 @@ public class TestRegionPlacement {
/**
* Create a table with specified table name and region number.
* @param tablename
* @param regionNum
* @return
* @param tableName the name of the table to be created
* @param regionNum number of regions to create
* @throws IOException
*/
private static void createTable(TableName tableName, int regionNum)

View File

@ -124,9 +124,7 @@ public class TestCompoundBloomFilter {
private FileSystem fs;
private BlockCache blockCache;
/**
* A message of the form <code>in test#&lt;number&gt;</code>" to include in logging.
*/
/** A message of the form "in test#&lt;number>:" to include in logging. */
private String testIdMsg;
private static final int GENERATION_SEED = 2319;

View File

@ -5925,7 +5925,7 @@ public class TestHRegion {
/**
* Utility method to setup a WAL mock.
* Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
* @return
* @return a mock WAL
* @throws IOException
*/
private WAL mockWAL() throws IOException {

View File

@ -274,9 +274,9 @@ public final class SnapshotTestingUtils {
* Helper method for testing async snapshot operations. Just waits for the
* given snapshot to complete on the server by repeatedly checking the master.
*
* @param master: the master running the snapshot
* @param snapshot: the snapshot to check
* @param sleep: amount to sleep between checks to see if the snapshot is done
* @param master the master running the snapshot
* @param snapshot the snapshot to check
* @param sleep amount to sleep between checks to see if the snapshot is done
* @throws ServiceException if the snapshot fails
* @throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
*/
@ -357,7 +357,7 @@ public final class SnapshotTestingUtils {
/**
* List all the HFiles in the given table
*
* @param fs: FileSystem where the table lives
* @param fs FileSystem where the table lives
* @param tableDir directory of the table
* @return array of the current HFiles in the table (could be a zero-length array)
* @throws IOException on unexecpted error reading the FS

View File

@ -140,7 +140,7 @@ public class HFileArchiveTestingUtil {
}
/**
* @return <expected, gotten, backup>, where each is sorted
* @return &lt;expected, gotten, backup&gt;, where each is sorted
*/
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
List<List<String>> files = new ArrayList<>(3);

View File

@ -423,7 +423,7 @@ public class TestRegionSplitter {
}
/**
* List.indexOf() doesn't really work for a List &lt;byte[]&gt;, because byte[]
* List.indexOf() doesn't really work for a List&lt;byte[]>, because byte[]
* doesn't override equals(). This method checks whether a list contains
* a given element by checking each element using the byte array
* comparator.

View File

@ -1388,7 +1388,7 @@
<asciidoctorj.pdf.version>1.5.0-alpha.15</asciidoctorj.pdf.version>
<build.helper.maven.version>3.0.0</build.helper.maven.version>
<buildnumber.maven.version>1.4</buildnumber.maven.version>
<checkstyle.version>6.18</checkstyle.version>
<checkstyle.version>8.2</checkstyle.version>
<exec.maven.version>1.6.0</exec.maven.version>
<error-prone.version>2.2.0</error-prone.version>
<findbugs-annotations>1.3.9-1</findbugs-annotations>
@ -1397,7 +1397,7 @@
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
<maven.antrun.version>1.8</maven.antrun.version>
<maven.bundle.version>3.3.0</maven.bundle.version>
<maven.checkstyle.version>2.17</maven.checkstyle.version>
<maven.checkstyle.version>3.0.0</maven.checkstyle.version>
<maven.compiler.version>3.6.1</maven.compiler.version>
<maven.dependency.version>3.0.1</maven.dependency.version>
<maven.eclipse.version>2.10</maven.eclipse.version>