HBASE-20478 Update checkstyle to v8.2
Cannot go to latest (8.9) yet due to https://github.com/checkstyle/checkstyle/issues/5279 * move hbaseanti import checks to checkstyle * implment a few missing equals checks, and ignore one * fix lots of javadoc errors Signed-off-by: Sean Busbey <busbey@apache.org>
This commit is contained in:
parent
42be553433
commit
a110e1eff5
|
@ -631,24 +631,6 @@ function hbaseanti_patchfile
|
||||||
((result=result+1))
|
((result=result+1))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
warnings=$(${GREP} -c 'import org.apache.hadoop.classification' "${patchfile}")
|
|
||||||
if [[ ${warnings} -gt 0 ]]; then
|
|
||||||
add_vote_table -1 hbaseanti "" "The patch appears use Hadoop classification instead of HBase."
|
|
||||||
((result=result+1))
|
|
||||||
fi
|
|
||||||
|
|
||||||
warnings=$(${GREP} -c 'import org.codehaus.jackson' "${patchfile}")
|
|
||||||
if [[ ${warnings} -gt 0 ]]; then
|
|
||||||
add_vote_table -1 hbaseanti "" "The patch appears use Jackson 1 classes/annotations."
|
|
||||||
((result=result+1))
|
|
||||||
fi
|
|
||||||
|
|
||||||
warnings=$(${GREP} -cE 'org.apache.commons.logging.Log(Factory|;)' "${patchfile}")
|
|
||||||
if [[ ${warnings} -gt 0 ]]; then
|
|
||||||
add_vote_table -1 hbaseanti "" "The patch appears to use commons-logging instead of slf4j."
|
|
||||||
((result=result+1))
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ${result} -gt 0 ]]; then
|
if [[ ${result} -gt 0 ]]; then
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -77,7 +77,7 @@ public class RestoreTablesClient {
|
||||||
/**
|
/**
|
||||||
* Validate target tables.
|
* Validate target tables.
|
||||||
*
|
*
|
||||||
* @param tTableArray: target tables
|
* @param tTableArray target tables
|
||||||
* @param isOverwrite overwrite existing table
|
* @param isOverwrite overwrite existing table
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
|
@ -123,10 +123,10 @@ public class RestoreTablesClient {
|
||||||
/**
|
/**
|
||||||
* Restore operation handle each backupImage in array.
|
* Restore operation handle each backupImage in array.
|
||||||
*
|
*
|
||||||
* @param images: array BackupImage
|
* @param images array BackupImage
|
||||||
* @param sTable: table to be restored
|
* @param sTable table to be restored
|
||||||
* @param tTable: table to be restored to
|
* @param tTable table to be restored to
|
||||||
* @param truncateIfExists: truncate table
|
* @param truncateIfExists truncate table
|
||||||
* @throws IOException exception
|
* @throws IOException exception
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -37,4 +37,5 @@
|
||||||
<suppress checks="VisibilityModifier" files=".*/src/test/.*\.java"/>
|
<suppress checks="VisibilityModifier" files=".*/src/test/.*\.java"/>
|
||||||
<suppress checks="InterfaceIsTypeCheck" files=".*/src/main/.*\.java"/>
|
<suppress checks="InterfaceIsTypeCheck" files=".*/src/main/.*\.java"/>
|
||||||
<suppress checks="EmptyBlockCheck" files="TBoundedThreadPoolServer.java"/>
|
<suppress checks="EmptyBlockCheck" files="TBoundedThreadPoolServer.java"/>
|
||||||
|
<suppress checks="EqualsHashCode" files="StartcodeAgnosticServerName.java"/>
|
||||||
</suppressions>
|
</suppressions>
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
<module name="VisibilityModifier">
|
<module name="VisibilityModifier">
|
||||||
<property name="packageAllowed" value="true"/>
|
<property name="packageAllowed" value="true"/>
|
||||||
<property name="protectedAllowed" value="true"/>
|
<property name="protectedAllowed" value="true"/>
|
||||||
|
<property name="allowPublicImmutableFields" value="true"/>
|
||||||
</module>
|
</module>
|
||||||
|
|
||||||
<!-- Coding Checks
|
<!-- Coding Checks
|
||||||
|
@ -85,7 +86,12 @@
|
||||||
org.apache.commons.collections4,
|
org.apache.commons.collections4,
|
||||||
org.apache.commons.lang,
|
org.apache.commons.lang,
|
||||||
org.apache.curator.shaded,
|
org.apache.curator.shaded,
|
||||||
org.apache.htrace.shaded"/>
|
org.apache.hadoop.classification,
|
||||||
|
org.apache.htrace.shaded,
|
||||||
|
org.codehaus.jackson"/>
|
||||||
|
<property name="illegalClasses" value="
|
||||||
|
org.apache.commons.logging.Log,
|
||||||
|
org.apache.commons.logging.LogFactory"/>
|
||||||
</module>
|
</module>
|
||||||
<!-- Javadoc Checks
|
<!-- Javadoc Checks
|
||||||
http://checkstyle.sourceforge.net/config_javadoc.html -->
|
http://checkstyle.sourceforge.net/config_javadoc.html -->
|
||||||
|
|
|
@ -232,7 +232,7 @@ public interface ClusterConnection extends Connection {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Establishes a connection to the region server at the specified address.
|
* Establishes a connection to the region server at the specified address.
|
||||||
* @param serverName
|
* @param serverName the region server to connect to
|
||||||
* @return proxy for HRegionServer
|
* @return proxy for HRegionServer
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
|
@ -242,7 +242,7 @@ public interface ClusterConnection extends Connection {
|
||||||
* Establishes a connection to the region server at the specified address, and returns
|
* Establishes a connection to the region server at the specified address, and returns
|
||||||
* a region client protocol.
|
* a region client protocol.
|
||||||
*
|
*
|
||||||
* @param serverName
|
* @param serverName the region server to connect to
|
||||||
* @return ClientProtocol proxy for RegionServer
|
* @return ClientProtocol proxy for RegionServer
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*
|
*
|
||||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}.
|
* Used by {@link org.apache.hadoop.hbase.client.Admin#listReplicatedTableCFs()}.
|
||||||
* The cfs is a map of <ColumnFamily, ReplicationScope>.
|
* The cfs is a map of <ColumnFamily, ReplicationScope>.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
public class TableCFs {
|
public class TableCFs {
|
||||||
|
|
|
@ -1559,7 +1559,7 @@ public final class ProtobufUtil {
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildGetServerInfoRequest()}
|
* @see #buildGetServerInfoRequest()
|
||||||
*/
|
*/
|
||||||
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
|
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST =
|
||||||
GetServerInfoRequest.newBuilder().build();
|
GetServerInfoRequest.newBuilder().build();
|
||||||
|
|
|
@ -86,7 +86,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
* <tr><td>u.<user></td><td>q:s</td><td><global-quotas></td></tr>
|
* <tr><td>u.<user></td><td>q:s</td><td><global-quotas></td></tr>
|
||||||
* <tr><td>u.<user></td><td>q:s.<table></td><td><table-quotas></td></tr>
|
* <tr><td>u.<user></td><td>q:s.<table></td><td><table-quotas></td></tr>
|
||||||
* <tr><td>u.<user></td><td>q:s.<ns></td><td><namespace-quotas></td></tr>
|
* <tr><td>u.<user></td><td>q:s.<ns></td><td><namespace-quotas></td></tr>
|
||||||
* </table
|
* </table>
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
|
|
@ -1026,7 +1026,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildRollWALWriterRequest()}
|
* @see #buildRollWALWriterRequest()
|
||||||
*/
|
*/
|
||||||
private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder()
|
private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder()
|
||||||
.build();
|
.build();
|
||||||
|
@ -1040,7 +1040,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildGetServerInfoRequest()}
|
* @see #buildGetServerInfoRequest()
|
||||||
*/
|
*/
|
||||||
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder()
|
private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder()
|
||||||
.build();
|
.build();
|
||||||
|
@ -1522,7 +1522,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildCatalogScanRequest}
|
* @see #buildCatalogScanRequest
|
||||||
*/
|
*/
|
||||||
private static final RunCatalogScanRequest CATALOG_SCAN_REQUEST =
|
private static final RunCatalogScanRequest CATALOG_SCAN_REQUEST =
|
||||||
RunCatalogScanRequest.newBuilder().build();
|
RunCatalogScanRequest.newBuilder().build();
|
||||||
|
@ -1544,7 +1544,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildIsCatalogJanitorEnabledRequest()}
|
* @see #buildIsCatalogJanitorEnabledRequest()
|
||||||
*/
|
*/
|
||||||
private static final IsCatalogJanitorEnabledRequest IS_CATALOG_JANITOR_ENABLED_REQUEST =
|
private static final IsCatalogJanitorEnabledRequest IS_CATALOG_JANITOR_ENABLED_REQUEST =
|
||||||
IsCatalogJanitorEnabledRequest.newBuilder().build();
|
IsCatalogJanitorEnabledRequest.newBuilder().build();
|
||||||
|
@ -1558,7 +1558,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildCleanerChoreRequest}
|
* @see #buildRunCleanerChoreRequest()
|
||||||
*/
|
*/
|
||||||
private static final RunCleanerChoreRequest CLEANER_CHORE_REQUEST =
|
private static final RunCleanerChoreRequest CLEANER_CHORE_REQUEST =
|
||||||
RunCleanerChoreRequest.newBuilder().build();
|
RunCleanerChoreRequest.newBuilder().build();
|
||||||
|
@ -1580,7 +1580,7 @@ public final class RequestConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see {@link #buildIsCleanerChoreEnabledRequest()}
|
* @see #buildIsCleanerChoreEnabledRequest()
|
||||||
*/
|
*/
|
||||||
private static final IsCleanerChoreEnabledRequest IS_CLEANER_CHORE_ENABLED_REQUEST =
|
private static final IsCleanerChoreEnabledRequest IS_CLEANER_CHORE_ENABLED_REQUEST =
|
||||||
IsCleanerChoreEnabledRequest.newBuilder().build();
|
IsCleanerChoreEnabledRequest.newBuilder().build();
|
||||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
|
||||||
* An immutable type to hold a hostname and port combo, like an Endpoint
|
* An immutable type to hold a hostname and port combo, like an Endpoint
|
||||||
* or java.net.InetSocketAddress (but without danger of our calling
|
* or java.net.InetSocketAddress (but without danger of our calling
|
||||||
* resolve -- we do NOT want a resolve happening every time we want
|
* resolve -- we do NOT want a resolve happening every time we want
|
||||||
* to hold a hostname and port combo). This class is also <<Comparable>>.
|
* to hold a hostname and port combo). This class is also {@link Comparable}
|
||||||
* <p>In implementation this class is a facade over Guava's {@link HostAndPort}.
|
* <p>In implementation this class is a facade over Guava's {@link HostAndPort}.
|
||||||
* We cannot have Guava classes in our API hence this Type.
|
* We cannot have Guava classes in our API hence this Type.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -315,6 +315,20 @@ public abstract class AbstractByteRange implements ByteRange {
|
||||||
hash = UNSET_HASH_VALUE;
|
hash = UNSET_HASH_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (this == obj) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (obj == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!(obj instanceof ByteRange)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return compareTo((ByteRange) obj) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Bitwise comparison of each byte in the array. Unsigned comparison, not
|
* Bitwise comparison of each byte in the array. Unsigned comparison, not
|
||||||
* paying attention to java's signed bytes.
|
* paying attention to java's signed bytes.
|
||||||
|
|
|
@ -30,12 +30,13 @@ import org.apache.hbase.thirdparty.com.google.common.escape.Escapers;
|
||||||
/**
|
/**
|
||||||
* Utility class for converting objects to JRuby.
|
* Utility class for converting objects to JRuby.
|
||||||
*
|
*
|
||||||
* It handles null, Boolean, Number, String, byte[], List<Object>, Map<String, Object> structures.
|
* It handles null, Boolean, Number, String, byte[], List<Object>, Map<String, Object>
|
||||||
|
* structures.
|
||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* E.g.
|
* E.g.
|
||||||
* <pre>
|
* <pre>
|
||||||
* Map<String, Object> map = new LinkedHashMap<>();
|
* Map<String, Object> map = new LinkedHashMap<>();
|
||||||
* map.put("null", null);
|
* map.put("null", null);
|
||||||
* map.put("boolean", true);
|
* map.put("boolean", true);
|
||||||
* map.put("number", 1);
|
* map.put("number", 1);
|
||||||
|
@ -48,7 +49,8 @@ import org.apache.hbase.thirdparty.com.google.common.escape.Escapers;
|
||||||
* <p>
|
* <p>
|
||||||
* Calling {@link #print(Object)} method will result:
|
* Calling {@link #print(Object)} method will result:
|
||||||
* <pre>
|
* <pre>
|
||||||
* { null => '', boolean => 'true', number => '1', string => 'str', binary => '010203', list => [ '1', '2', 'true' ] }
|
* { null => '', boolean => 'true', number => '1', string => 'str',
|
||||||
|
* binary => '010203', list => [ '1', '2', 'true' ] }
|
||||||
* </pre>
|
* </pre>
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -339,7 +339,7 @@ public class OrderedBytes {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform unsigned comparison between two long values. Conforms to the same interface as
|
* Perform unsigned comparison between two long values. Conforms to the same interface as
|
||||||
* {@link org.apache.hadoop.hbase.CellComparator#COMPARATOR#compare(Object, Object)}.
|
* {@link org.apache.hadoop.hbase.CellComparator}.
|
||||||
*/
|
*/
|
||||||
private static int unsignedCmp(long x1, long x2) {
|
private static int unsignedCmp(long x1, long x2) {
|
||||||
int cmp;
|
int cmp;
|
||||||
|
|
|
@ -83,7 +83,7 @@ public final class Waiter {
|
||||||
/**
|
/**
|
||||||
* A predicate 'closure' used by the {@link Waiter#waitFor(Configuration, long, Predicate)} and
|
* A predicate 'closure' used by the {@link Waiter#waitFor(Configuration, long, Predicate)} and
|
||||||
* {@link Waiter#waitFor(Configuration, long, Predicate)} and
|
* {@link Waiter#waitFor(Configuration, long, Predicate)} and
|
||||||
* {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate) methods.
|
* {@link Waiter#waitFor(Configuration, long, long, boolean, Predicate)} methods.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public interface Predicate<E extends Exception> {
|
public interface Predicate<E extends Exception> {
|
||||||
|
|
|
@ -95,6 +95,9 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
|
||||||
done.run(null);
|
done.run(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
|
||||||
|
*/
|
||||||
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
|
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
|
||||||
convert(PrepareBulkLoadRequest request)
|
convert(PrepareBulkLoadRequest request)
|
||||||
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {
|
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
@ -121,8 +124,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
|
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
|
||||||
* @throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
|
|
||||||
*/
|
*/
|
||||||
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest
|
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest
|
||||||
convert(CleanupBulkLoadRequest request)
|
convert(CleanupBulkLoadRequest request)
|
||||||
|
@ -153,6 +155,9 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg
|
||||||
done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
|
done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert from CPEP protobuf 2.5 to internal protobuf 3.3.
|
||||||
|
*/
|
||||||
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest
|
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest
|
||||||
convert(BulkLoadHFileRequest request)
|
convert(BulkLoadHFileRequest request)
|
||||||
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {
|
throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException {
|
||||||
|
|
|
@ -322,10 +322,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
|
||||||
* will be accessed as the passed user, by sending user.name request
|
* will be accessed as the passed user, by sending user.name request
|
||||||
* parameter.
|
* parameter.
|
||||||
*
|
*
|
||||||
* @param urlstring
|
* @param urlstring The url to access
|
||||||
* @param userName
|
* @param userName The user to perform access as
|
||||||
* @return
|
* @return The HTTP response code
|
||||||
* @throws IOException
|
* @throws IOException if there is a problem communicating with the server
|
||||||
*/
|
*/
|
||||||
static int getHttpStatusCode(String urlstring, String userName)
|
static int getHttpStatusCode(String urlstring, String userName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -38,10 +38,10 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Base class for HBase integration tests that want to use the Chaos Monkey.
|
* Base class for HBase integration tests that want to use the Chaos Monkey.
|
||||||
* Usage: bin/hbase <sub_class_of_IntegrationTestBase> <options>
|
* Usage: bin/hbase <sub_class_of_IntegrationTestBase> <options>
|
||||||
* Options: -h,--help Show usage
|
* Options: -h,--help Show usage
|
||||||
* -m,--monkey <arg> Which chaos monkey to run
|
* -m,--monkey <arg> Which chaos monkey to run
|
||||||
* -monkeyProps <arg> The properties file for specifying chaos monkey properties.
|
* -monkeyProps <arg> The properties file for specifying chaos monkey properties.
|
||||||
* -ncc Option to not clean up the cluster at the end.
|
* -ncc Option to not clean up the cluster at the end.
|
||||||
*/
|
*/
|
||||||
public abstract class IntegrationTestBase extends AbstractHBaseTool {
|
public abstract class IntegrationTestBase extends AbstractHBaseTool {
|
||||||
|
|
|
@ -50,7 +50,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
|
||||||
* with the replication of the edits before read_delay_ms to the given region replica id so that
|
* with the replication of the edits before read_delay_ms to the given region replica id so that
|
||||||
* the read and verify will not fail.
|
* the read and verify will not fail.
|
||||||
*
|
*
|
||||||
* The job will run for <b>at least<b> given runtime (default 10min) by running a concurrent
|
* The job will run for <b>at least</b> given runtime (default 10min) by running a concurrent
|
||||||
* writer and reader workload followed by a concurrent updater and reader workload for
|
* writer and reader workload followed by a concurrent updater and reader workload for
|
||||||
* num_keys_per_server.
|
* num_keys_per_server.
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -107,8 +107,8 @@ public class TestTableInputFormat {
|
||||||
/**
|
/**
|
||||||
* Setup a table with two rows and values.
|
* Setup a table with two rows and values.
|
||||||
*
|
*
|
||||||
* @param tableName
|
* @param tableName the name of the table to create
|
||||||
* @return
|
* @return A Table instance for the created table.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName) throws IOException {
|
public static Table createTable(byte[] tableName) throws IOException {
|
||||||
|
@ -119,7 +119,7 @@ public class TestTableInputFormat {
|
||||||
* Setup a table with two rows and values per column family.
|
* Setup a table with two rows and values per column family.
|
||||||
*
|
*
|
||||||
* @param tableName
|
* @param tableName
|
||||||
* @return
|
* @return A Table instance for the created table.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
||||||
|
|
|
@ -537,9 +537,9 @@ public class TestImportExport {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Count the number of keyvalues in the specified table for the given timerange
|
* Count the number of keyvalues in the specified table with the given filter
|
||||||
* @param table
|
* @param table the table to scan
|
||||||
* @return
|
* @return the number of keyvalues found
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private int getCount(Table table, Filter filter) throws IOException {
|
private int getCount(Table table, Filter filter) throws IOException {
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class TestTableInputFormat {
|
||||||
* Setup a table with two rows and values.
|
* Setup a table with two rows and values.
|
||||||
*
|
*
|
||||||
* @param tableName
|
* @param tableName
|
||||||
* @return
|
* @return A Table instance for the created table.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName) throws IOException {
|
public static Table createTable(byte[] tableName) throws IOException {
|
||||||
|
@ -116,7 +116,7 @@ public class TestTableInputFormat {
|
||||||
* Setup a table with two rows and values per column family.
|
* Setup a table with two rows and values per column family.
|
||||||
*
|
*
|
||||||
* @param tableName
|
* @param tableName
|
||||||
* @return
|
* @return A Table instance for the created table.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
public static Table createTable(byte[] tableName, byte[][] families) throws IOException {
|
||||||
|
|
|
@ -820,10 +820,10 @@ public class LoadTestTool extends AbstractHBaseTool {
|
||||||
/**
|
/**
|
||||||
* When NUM_TABLES is specified, the function starts multiple worker threads
|
* When NUM_TABLES is specified, the function starts multiple worker threads
|
||||||
* which individually start a LoadTestTool instance to load a table. Each
|
* which individually start a LoadTestTool instance to load a table. Each
|
||||||
* table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
|
* table name is in format <tn>_<index>. For example, "-tn test -num_tables 2"
|
||||||
* , table names will be "test_1", "test_2"
|
* , table names will be "test_1", "test_2"
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException if one of the load tasks is unable to complete
|
||||||
*/
|
*/
|
||||||
private int parallelLoadTables()
|
private int parallelLoadTables()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
|
@ -63,4 +63,8 @@ class StartcodeAgnosticServerName extends ServerName {
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return getHostAndPort().hashCode();
|
return getHostAndPort().hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do not need @Override #equals() because super.equals() delegates to compareTo(), which ends
|
||||||
|
// up doing the right thing. We have a test for it, so the checkstyle warning here would be a
|
||||||
|
// false positive.
|
||||||
}
|
}
|
||||||
|
|
|
@ -1481,7 +1481,7 @@ public class HFileBlockIndex {
|
||||||
* The same as {@link #add(byte[], long, int, long)} but does not take the
|
* The same as {@link #add(byte[], long, int, long)} but does not take the
|
||||||
* key/value into account. Used for single-level indexes.
|
* key/value into account. Used for single-level indexes.
|
||||||
*
|
*
|
||||||
* @see {@link #add(byte[], long, int, long)}
|
* @see #add(byte[], long, int, long)
|
||||||
*/
|
*/
|
||||||
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
|
public void add(byte[] firstKey, long blockOffset, int onDiskDataSize) {
|
||||||
add(firstKey, blockOffset, onDiskDataSize, -1);
|
add(firstKey, blockOffset, onDiskDataSize, -1);
|
||||||
|
|
|
@ -585,7 +585,7 @@ public abstract class RpcServer implements RpcServerInterface,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer).
|
* Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}.
|
||||||
* Only one of readCh or writeCh should be non-null.
|
* Only one of readCh or writeCh should be non-null.
|
||||||
*
|
*
|
||||||
* @param readCh read channel
|
* @param readCh read channel
|
||||||
|
|
|
@ -57,7 +57,7 @@ interface StoreFlushContext {
|
||||||
*
|
*
|
||||||
* A very short operation
|
* A very short operation
|
||||||
*
|
*
|
||||||
* @return
|
* @return whether compaction is required
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
boolean commit(MonitoredTask status) throws IOException;
|
boolean commit(MonitoredTask status) throws IOException;
|
||||||
|
|
|
@ -60,7 +60,7 @@ public interface ColumnTracker extends ShipperListener {
|
||||||
* method based on the return type (INCLUDE) of this method. The values that can be returned by
|
* method based on the return type (INCLUDE) of this method. The values that can be returned by
|
||||||
* this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and
|
* this method are {@link MatchCode#INCLUDE}, {@link MatchCode#SEEK_NEXT_COL} and
|
||||||
* {@link MatchCode#SEEK_NEXT_ROW}.
|
* {@link MatchCode#SEEK_NEXT_ROW}.
|
||||||
* @param cell
|
* @param cell a cell with the column to match against
|
||||||
* @param type The type of the Cell
|
* @param type The type of the Cell
|
||||||
* @return The match code instance.
|
* @return The match code instance.
|
||||||
* @throws IOException in case there is an internal consistency problem caused by a data
|
* @throws IOException in case there is an internal consistency problem caused by a data
|
||||||
|
@ -77,7 +77,7 @@ public interface ColumnTracker extends ShipperListener {
|
||||||
* Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in
|
* Implementations which include all the columns could just return {@link MatchCode#INCLUDE} in
|
||||||
* the {@link #checkColumn(Cell, byte)} method and perform all the operations in this
|
* the {@link #checkColumn(Cell, byte)} method and perform all the operations in this
|
||||||
* checkVersions method.
|
* checkVersions method.
|
||||||
* @param cell
|
* @param cell a cell with the column to match against
|
||||||
* @param timestamp The timestamp of the cell.
|
* @param timestamp The timestamp of the cell.
|
||||||
* @param type the type of the key value (Put/Delete)
|
* @param type the type of the key value (Put/Delete)
|
||||||
* @param ignoreCount indicates if the KV needs to be excluded while counting (used during
|
* @param ignoreCount indicates if the KV needs to be excluded while counting (used during
|
||||||
|
|
|
@ -165,7 +165,7 @@ class AccessControlFilter extends FilterBase {
|
||||||
* @param pbBytes A pb serialized {@link AccessControlFilter} instance
|
* @param pbBytes A pb serialized {@link AccessControlFilter} instance
|
||||||
* @return An instance of {@link AccessControlFilter} made from <code>bytes</code>
|
* @return An instance of {@link AccessControlFilter} made from <code>bytes</code>
|
||||||
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
|
* @throws org.apache.hadoop.hbase.exceptions.DeserializationException
|
||||||
* @see {@link #toByteArray()}
|
* @see #toByteArray()
|
||||||
*/
|
*/
|
||||||
public static AccessControlFilter parseFrom(final byte [] pbBytes)
|
public static AccessControlFilter parseFrom(final byte [] pbBytes)
|
||||||
throws DeserializationException {
|
throws DeserializationException {
|
||||||
|
|
|
@ -590,7 +590,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* Start a minidfscluster.
|
* Start a minidfscluster.
|
||||||
* @param servers How many DNs to start.
|
* @param servers How many DNs to start.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
* @return The mini dfs cluster created.
|
* @return The mini dfs cluster created.
|
||||||
*/
|
*/
|
||||||
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
|
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
|
||||||
|
@ -605,7 +605,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* datanodes will have the same host name.
|
* datanodes will have the same host name.
|
||||||
* @param hosts hostnames DNs to run on.
|
* @param hosts hostnames DNs to run on.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
* @return The mini dfs cluster created.
|
* @return The mini dfs cluster created.
|
||||||
*/
|
*/
|
||||||
public MiniDFSCluster startMiniDFSCluster(final String hosts[])
|
public MiniDFSCluster startMiniDFSCluster(final String hosts[])
|
||||||
|
@ -623,7 +623,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* @param servers How many DNs to start.
|
* @param servers How many DNs to start.
|
||||||
* @param hosts hostnames DNs to run on.
|
* @param hosts hostnames DNs to run on.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
* @return The mini dfs cluster created.
|
* @return The mini dfs cluster created.
|
||||||
*/
|
*/
|
||||||
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
|
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
|
||||||
|
@ -767,7 +767,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* Start up a minicluster of hbase, dfs, and zookeeper.
|
* Start up a minicluster of hbase, dfs, and zookeeper.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster() throws Exception {
|
public MiniHBaseCluster startMiniCluster() throws Exception {
|
||||||
return startMiniCluster(1, 1);
|
return startMiniCluster(1, 1);
|
||||||
|
@ -777,7 +777,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
|
* Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
|
public MiniHBaseCluster startMiniCluster(boolean withWALDir) throws Exception {
|
||||||
return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
|
return startMiniCluster(1, 1, 1, null, null, null, false, withWALDir);
|
||||||
|
@ -789,7 +789,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* (will overwrite if dir already exists)
|
* (will overwrite if dir already exists)
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
* @see {@link #shutdownMiniDFSCluster()}
|
* @see #shutdownMiniDFSCluster()
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
|
public MiniHBaseCluster startMiniCluster(final int numSlaves, boolean create)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
@ -806,7 +806,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
|
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
|
||||||
* bind errors.
|
* bind errors.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniCluster()}
|
* @see #shutdownMiniCluster()
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numSlaves)
|
public MiniHBaseCluster startMiniCluster(final int numSlaves)
|
||||||
|
@ -823,7 +823,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* Start minicluster. Whether to create a new root or data dir path even if such a path
|
* Start minicluster. Whether to create a new root or data dir path even if such a path
|
||||||
* has been created earlier is decided based on flag <code>create</code>
|
* has been created earlier is decided based on flag <code>create</code>
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniCluster()}
|
* @see #shutdownMiniCluster()
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||||
|
@ -835,7 +835,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
/**
|
/**
|
||||||
* start minicluster
|
* start minicluster
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniCluster()}
|
* @see #shutdownMiniCluster()
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||||
|
@ -872,7 +872,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* If you start MiniDFSCluster without host names,
|
* If you start MiniDFSCluster without host names,
|
||||||
* all instances of the datanodes will have the same host name.
|
* all instances of the datanodes will have the same host name.
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniCluster()}
|
* @see #shutdownMiniCluster()
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||||
|
@ -914,7 +914,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* @param regionserverClass The class to use as HRegionServer, or null for
|
* @param regionserverClass The class to use as HRegionServer, or null for
|
||||||
* default
|
* default
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
* @see {@link #shutdownMiniCluster()}
|
* @see #shutdownMiniCluster()
|
||||||
* @return Mini hbase cluster instance created.
|
* @return Mini hbase cluster instance created.
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
public MiniHBaseCluster startMiniCluster(final int numMasters,
|
||||||
|
@ -1003,7 +1003,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
* @return Reference to the hbase mini hbase cluster.
|
* @return Reference to the hbase mini hbase cluster.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @throws InterruptedException
|
* @throws InterruptedException
|
||||||
* @see {@link #startMiniCluster()}
|
* @see #startMiniCluster()
|
||||||
*/
|
*/
|
||||||
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
|
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
|
||||||
final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
|
final int numSlaves, List<Integer> rsPorts, Class<? extends HMaster> masterClass,
|
||||||
|
@ -1088,7 +1088,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
/**
|
/**
|
||||||
* Stops mini hbase, zk, and hdfs clusters.
|
* Stops mini hbase, zk, and hdfs clusters.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @see {@link #startMiniCluster(int)}
|
* @see #startMiniCluster(int)
|
||||||
*/
|
*/
|
||||||
public void shutdownMiniCluster() throws Exception {
|
public void shutdownMiniCluster() throws Exception {
|
||||||
LOG.info("Shutting down minicluster");
|
LOG.info("Shutting down minicluster");
|
||||||
|
@ -1746,10 +1746,10 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an HRegion that writes to the local tmp dirs
|
* Create an HRegion that writes to the local tmp dirs
|
||||||
* @param desc
|
* @param desc a table descriptor indicating which table the region belongs to
|
||||||
* @param startKey
|
* @param startKey the start boundary of the region
|
||||||
* @param endKey
|
* @param endKey the end boundary of the region
|
||||||
* @return
|
* @return a region that writes to local dir for testing
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
|
public HRegion createLocalHRegion(TableDescriptor desc, byte [] startKey,
|
||||||
|
|
|
@ -163,26 +163,25 @@ public abstract class MultithreadedTestUtil {
|
||||||
* Verify that no assertions have failed inside a future.
|
* Verify that no assertions have failed inside a future.
|
||||||
* Used for unit tests that spawn threads. E.g.,
|
* Used for unit tests that spawn threads. E.g.,
|
||||||
* <p>
|
* <p>
|
||||||
* <code>
|
* <pre>
|
||||||
* List<Future<Void>> results = Lists.newArrayList();
|
* List<Future<Void>> results = Lists.newArrayList();
|
||||||
* Future<Void> f = executor.submit(new Callable<Void> {
|
* Future<Void> f = executor.submit(new Callable<Void> {
|
||||||
* public Void call() {
|
* public Void call() {
|
||||||
* assertTrue(someMethod());
|
* assertTrue(someMethod());
|
||||||
* }
|
* }
|
||||||
* });
|
* });
|
||||||
* results.add(f);
|
* results.add(f);
|
||||||
* assertOnFutures(results);
|
* assertOnFutures(results);
|
||||||
* </code>
|
* </pre>
|
||||||
* @param threadResults A list of futures
|
* @param threadResults A list of futures
|
||||||
* @param <T>
|
|
||||||
* @throws InterruptedException If interrupted when waiting for a result
|
* @throws InterruptedException If interrupted when waiting for a result
|
||||||
* from one of the futures
|
* from one of the futures
|
||||||
* @throws ExecutionException If an exception other than AssertionError
|
* @throws ExecutionException If an exception other than AssertionError
|
||||||
* occurs inside any of the futures
|
* occurs inside any of the futures
|
||||||
*/
|
*/
|
||||||
public static <T> void assertOnFutures(List<Future<T>> threadResults)
|
public static void assertOnFutures(List<Future<?>> threadResults)
|
||||||
throws InterruptedException, ExecutionException {
|
throws InterruptedException, ExecutionException {
|
||||||
for (Future<T> threadResult : threadResults) {
|
for (Future<?> threadResult : threadResults) {
|
||||||
try {
|
try {
|
||||||
threadResult.get();
|
threadResult.get();
|
||||||
} catch (ExecutionException e) {
|
} catch (ExecutionException e) {
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class TestMetaTableAccessorNoCluster {
|
||||||
/**
|
/**
|
||||||
* Test that MetaTableAccessor will ride over server throwing
|
* Test that MetaTableAccessor will ride over server throwing
|
||||||
* "Server not running" IOEs.
|
* "Server not running" IOEs.
|
||||||
* @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
|
* @see <a href="https://issues.apache.org/jira/browse/HBASE-3446">HBASE-3446</a>
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @throws InterruptedException
|
* @throws InterruptedException
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -603,12 +603,12 @@ public class TestPartialResultsFromClientSide {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make puts to put the input value into each combination of row, family, and qualifier
|
* Make puts to put the input value into each combination of row, family, and qualifier
|
||||||
* @param rows
|
* @param rows the rows to use
|
||||||
* @param families
|
* @param families the families to use
|
||||||
* @param qualifiers
|
* @param qualifiers the qualifiers to use
|
||||||
* @param value
|
* @param value the values to use
|
||||||
* @return
|
* @return the dot product of the given rows, families, qualifiers, and values
|
||||||
* @throws IOException
|
* @throws IOException if there is a problem creating one of the Put objects
|
||||||
*/
|
*/
|
||||||
static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
|
static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
|
||||||
byte[] value) throws IOException {
|
byte[] value) throws IOException {
|
||||||
|
@ -632,11 +632,11 @@ public class TestPartialResultsFromClientSide {
|
||||||
/**
|
/**
|
||||||
* Make key values to represent each possible combination of family and qualifier in the specified
|
* Make key values to represent each possible combination of family and qualifier in the specified
|
||||||
* row.
|
* row.
|
||||||
* @param row
|
* @param row the row to use
|
||||||
* @param families
|
* @param families the families to use
|
||||||
* @param qualifiers
|
* @param qualifiers the qualifiers to use
|
||||||
* @param value
|
* @param value the values to use
|
||||||
* @return
|
* @return the dot product of the given families, qualifiers, and values for a given row
|
||||||
*/
|
*/
|
||||||
static ArrayList<Cell> createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers,
|
static ArrayList<Cell> createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers,
|
||||||
byte[] value) {
|
byte[] value) {
|
||||||
|
@ -772,9 +772,9 @@ public class TestPartialResultsFromClientSide {
|
||||||
/**
|
/**
|
||||||
* Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
|
* Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
|
||||||
* return total cell count
|
* return total cell count
|
||||||
* @param scanner
|
* @param scanner the scanner to exhaust
|
||||||
* @return
|
* @return the number of cells counted
|
||||||
* @throws Exception
|
* @throws Exception if there is a problem retrieving cells from the scanner
|
||||||
*/
|
*/
|
||||||
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
|
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
|
||||||
Result result = null;
|
Result result = null;
|
||||||
|
|
|
@ -425,9 +425,9 @@ public class TestHFileArchiving {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the names of all the files below the given directory
|
* Get the names of all the files below the given directory
|
||||||
* @param fs
|
* @param fs the file system to inspect
|
||||||
* @param archiveDir
|
* @param archiveDir the directory in which to look
|
||||||
* @return
|
* @return a list of all files in the directory and sub-directories
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {
|
private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class HConnectionTestingUtility {
|
||||||
/**
|
/**
|
||||||
* Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
|
* Get a Mocked {@link ClusterConnection} that goes with the passed <code>conf</code>
|
||||||
* configuration instance. Minimally the mock will return
|
* configuration instance. Minimally the mock will return
|
||||||
* <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
|
* <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
|
||||||
* Be sure to shutdown the connection when done by calling
|
* Be sure to shutdown the connection when done by calling
|
||||||
* {@link Connection#close()} else it will stick around; this is probably not what you want.
|
* {@link Connection#close()} else it will stick around; this is probably not what you want.
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
|
@ -148,7 +148,7 @@ public class HConnectionTestingUtility {
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return ClusterConnection object for <code>conf</code>
|
* @return ClusterConnection object for <code>conf</code>
|
||||||
* @throws ZooKeeperConnectionException
|
* @throws ZooKeeperConnectionException
|
||||||
* @see @link
|
* [Dead link]: See also
|
||||||
* {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
|
* {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
|
||||||
*/
|
*/
|
||||||
public static ClusterConnection getSpiedConnection(final Configuration conf)
|
public static ClusterConnection getSpiedConnection(final Configuration conf)
|
||||||
|
|
|
@ -24,9 +24,15 @@ import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import org.apache.hadoop.hbase.*;
|
|
||||||
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
|
import org.apache.hadoop.hbase.KeyValueTestUtil;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
|
@ -91,6 +97,24 @@ class StringRange {
|
||||||
return hashCode;
|
return hashCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (this == obj) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (obj == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!(obj instanceof StringRange)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
StringRange oth = (StringRange) obj;
|
||||||
|
return this.startInclusive == oth.startInclusive &&
|
||||||
|
this.endInclusive == oth.endInclusive &&
|
||||||
|
Objects.equals(this.start, oth.start) &&
|
||||||
|
Objects.equals(this.end, oth.end);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
String result = (this.startInclusive ? "[" : "(")
|
String result = (this.startInclusive ? "[" : "(")
|
||||||
|
@ -133,33 +157,21 @@ public class TestColumnRangeFilter {
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
TEST_UTIL.startMiniCluster();
|
TEST_UTIL.startMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownAfterClass() throws Exception {
|
public static void tearDownAfterClass() throws Exception {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws java.lang.Exception
|
|
||||||
*/
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.hadoop.io.BytesWritable;
|
||||||
import org.apache.hadoop.io.WritableComparator;
|
import org.apache.hadoop.io.WritableComparator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate random <key, value> pairs.
|
* Generate random <key, value> pairs.
|
||||||
* <p>
|
* <p>
|
||||||
* Copied from
|
* Copied from
|
||||||
* <a href="https://issues.apache.org/jira/browse/HADOOP-3315">hadoop-3315 tfile</a>.
|
* <a href="https://issues.apache.org/jira/browse/HADOOP-3315">hadoop-3315 tfile</a>.
|
||||||
|
|
|
@ -401,7 +401,7 @@ public class TestRegionPlacement {
|
||||||
/**
|
/**
|
||||||
* Verify the number of user regions is assigned to the primary
|
* Verify the number of user regions is assigned to the primary
|
||||||
* region server based on the plan is expected
|
* region server based on the plan is expected
|
||||||
* @param expectedNum.
|
* @param expectedNum the expected number of assigned regions
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void verifyRegionOnPrimaryRS(int expectedNum)
|
private void verifyRegionOnPrimaryRS(int expectedNum)
|
||||||
|
@ -536,9 +536,8 @@ public class TestRegionPlacement {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a table with specified table name and region number.
|
* Create a table with specified table name and region number.
|
||||||
* @param tablename
|
* @param tableName the name of the table to be created
|
||||||
* @param regionNum
|
* @param regionNum number of regions to create
|
||||||
* @return
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private static void createTable(TableName tableName, int regionNum)
|
private static void createTable(TableName tableName, int regionNum)
|
||||||
|
|
|
@ -124,7 +124,7 @@ public class TestCompoundBloomFilter {
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private BlockCache blockCache;
|
private BlockCache blockCache;
|
||||||
|
|
||||||
/** A message of the form "in test#<number>:" to include in logging. */
|
/** A message of the form "in test#<number>:" to include in logging. */
|
||||||
private String testIdMsg;
|
private String testIdMsg;
|
||||||
|
|
||||||
private static final int GENERATION_SEED = 2319;
|
private static final int GENERATION_SEED = 2319;
|
||||||
|
|
|
@ -5925,7 +5925,7 @@ public class TestHRegion {
|
||||||
/**
|
/**
|
||||||
* Utility method to setup a WAL mock.
|
* Utility method to setup a WAL mock.
|
||||||
* Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
|
* Needs to do the bit where we close latch on the WALKeyImpl on append else test hangs.
|
||||||
* @return
|
* @return a mock WAL
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private WAL mockWAL() throws IOException {
|
private WAL mockWAL() throws IOException {
|
||||||
|
|
|
@ -274,9 +274,9 @@ public final class SnapshotTestingUtils {
|
||||||
* Helper method for testing async snapshot operations. Just waits for the
|
* Helper method for testing async snapshot operations. Just waits for the
|
||||||
* given snapshot to complete on the server by repeatedly checking the master.
|
* given snapshot to complete on the server by repeatedly checking the master.
|
||||||
*
|
*
|
||||||
* @param master: the master running the snapshot
|
* @param master the master running the snapshot
|
||||||
* @param snapshot: the snapshot to check
|
* @param snapshot the snapshot to check
|
||||||
* @param sleep: amount to sleep between checks to see if the snapshot is done
|
* @param sleep amount to sleep between checks to see if the snapshot is done
|
||||||
* @throws ServiceException if the snapshot fails
|
* @throws ServiceException if the snapshot fails
|
||||||
* @throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
|
* @throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
|
||||||
*/
|
*/
|
||||||
|
@ -357,7 +357,7 @@ public final class SnapshotTestingUtils {
|
||||||
/**
|
/**
|
||||||
* List all the HFiles in the given table
|
* List all the HFiles in the given table
|
||||||
*
|
*
|
||||||
* @param fs: FileSystem where the table lives
|
* @param fs FileSystem where the table lives
|
||||||
* @param tableDir directory of the table
|
* @param tableDir directory of the table
|
||||||
* @return array of the current HFiles in the table (could be a zero-length array)
|
* @return array of the current HFiles in the table (could be a zero-length array)
|
||||||
* @throws IOException on unexecpted error reading the FS
|
* @throws IOException on unexecpted error reading the FS
|
||||||
|
|
|
@ -140,7 +140,7 @@ public class HFileArchiveTestingUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return <expected, gotten, backup>, where each is sorted
|
* @return <expected, gotten, backup>, where each is sorted
|
||||||
*/
|
*/
|
||||||
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
|
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
|
||||||
List<List<String>> files = new ArrayList<>(3);
|
List<List<String>> files = new ArrayList<>(3);
|
||||||
|
|
|
@ -423,7 +423,7 @@ public class TestRegionSplitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* List.indexOf() doesn't really work for a List<byte[]>, because byte[]
|
* List.indexOf() doesn't really work for a List<byte[]>, because byte[]
|
||||||
* doesn't override equals(). This method checks whether a list contains
|
* doesn't override equals(). This method checks whether a list contains
|
||||||
* a given element by checking each element using the byte array
|
* a given element by checking each element using the byte array
|
||||||
* comparator.
|
* comparator.
|
||||||
|
|
4
pom.xml
4
pom.xml
|
@ -1500,7 +1500,7 @@
|
||||||
<asciidoctorj.pdf.version>1.5.0-alpha.15</asciidoctorj.pdf.version>
|
<asciidoctorj.pdf.version>1.5.0-alpha.15</asciidoctorj.pdf.version>
|
||||||
<build.helper.maven.version>3.0.0</build.helper.maven.version>
|
<build.helper.maven.version>3.0.0</build.helper.maven.version>
|
||||||
<buildnumber.maven.version>1.4</buildnumber.maven.version>
|
<buildnumber.maven.version>1.4</buildnumber.maven.version>
|
||||||
<checkstyle.version>6.18</checkstyle.version>
|
<checkstyle.version>8.2</checkstyle.version>
|
||||||
<exec.maven.version>1.6.0</exec.maven.version>
|
<exec.maven.version>1.6.0</exec.maven.version>
|
||||||
<error-prone.version>2.2.0</error-prone.version>
|
<error-prone.version>2.2.0</error-prone.version>
|
||||||
<findbugs-annotations>1.3.9-1</findbugs-annotations>
|
<findbugs-annotations>1.3.9-1</findbugs-annotations>
|
||||||
|
@ -1509,7 +1509,7 @@
|
||||||
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
|
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
|
||||||
<maven.antrun.version>1.8</maven.antrun.version>
|
<maven.antrun.version>1.8</maven.antrun.version>
|
||||||
<maven.bundle.version>3.3.0</maven.bundle.version>
|
<maven.bundle.version>3.3.0</maven.bundle.version>
|
||||||
<maven.checkstyle.version>2.17</maven.checkstyle.version>
|
<maven.checkstyle.version>3.0.0</maven.checkstyle.version>
|
||||||
<maven.compiler.version>3.6.1</maven.compiler.version>
|
<maven.compiler.version>3.6.1</maven.compiler.version>
|
||||||
<maven.dependency.version>3.0.1</maven.dependency.version>
|
<maven.dependency.version>3.0.1</maven.dependency.version>
|
||||||
<maven.eclipse.version>2.10</maven.eclipse.version>
|
<maven.eclipse.version>2.10</maven.eclipse.version>
|
||||||
|
|
Loading…
Reference in New Issue