HBASE-19239 Fix findbugs and error-prone issues
Fixes for hbase-client
This commit is contained in:
parent
c179d5144f
commit
f856cbf414
@ -172,7 +172,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
int count = 0;
|
||||
if (liveServers != null && !liveServers.isEmpty()) {
|
||||
for (Map.Entry<ServerName, ServerLoad> e: this.liveServers.entrySet()) {
|
||||
count += e.getValue().getNumberOfRequests();
|
||||
count = (int) (count + e.getValue().getNumberOfRequests());
|
||||
}
|
||||
}
|
||||
return count;
|
||||
@ -188,6 +188,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
/**
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
@ -208,6 +209,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
/**
|
||||
* @see java.lang.Object#hashCode()
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return VERSION + hbaseVersion.hashCode() + this.liveServers.hashCode() +
|
||||
this.deadServers.hashCode() + this.master.hashCode() +
|
||||
@ -215,6 +217,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
}
|
||||
|
||||
/** @return the object version number */
|
||||
@Override
|
||||
public byte getVersion() {
|
||||
return VERSION;
|
||||
}
|
||||
@ -322,6 +325,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
return balancerOn;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder(1024);
|
||||
sb.append("Master: " + master);
|
||||
@ -440,7 +444,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
|
||||
|
||||
Map<ServerName, ServerLoad> servers = null;
|
||||
if (proto.getLiveServersList() != null) {
|
||||
if (!proto.getLiveServersList().isEmpty()) {
|
||||
servers = new HashMap<ServerName, ServerLoad>(proto.getLiveServersList().size());
|
||||
for (LiveServerInfo lsi : proto.getLiveServersList()) {
|
||||
servers.put(ProtobufUtil.toServerName(
|
||||
@ -449,7 +453,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
}
|
||||
|
||||
Collection<ServerName> deadServers = null;
|
||||
if (proto.getDeadServersList() != null) {
|
||||
if (!proto.getDeadServersList().isEmpty()) {
|
||||
deadServers = new ArrayList<ServerName>(proto.getDeadServersList().size());
|
||||
for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
|
||||
deadServers.add(ProtobufUtil.toServerName(sn));
|
||||
@ -457,7 +461,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
}
|
||||
|
||||
Collection<ServerName> backupMasters = null;
|
||||
if (proto.getBackupMastersList() != null) {
|
||||
if (!proto.getBackupMastersList().isEmpty()) {
|
||||
backupMasters = new ArrayList<ServerName>(proto.getBackupMastersList().size());
|
||||
for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
|
||||
backupMasters.add(ProtobufUtil.toServerName(sn));
|
||||
@ -465,7 +469,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
}
|
||||
|
||||
Set<RegionState> rit = null;
|
||||
if (proto.getRegionsInTransitionList() != null) {
|
||||
if (!proto.getRegionsInTransitionList().isEmpty()) {
|
||||
rit = new HashSet<RegionState>(proto.getRegionsInTransitionList().size());
|
||||
for (RegionInTransition region : proto.getRegionsInTransitionList()) {
|
||||
RegionState value = RegionState.convert(region.getRegionState());
|
||||
@ -474,7 +478,7 @@ public class ClusterStatus extends VersionedWritable {
|
||||
}
|
||||
|
||||
String[] masterCoprocessors = null;
|
||||
if (proto.getMasterCoprocessorsList() != null) {
|
||||
if (!proto.getMasterCoprocessorsList().isEmpty()) {
|
||||
final int numMasterCoprocessors = proto.getMasterCoprocessorsCount();
|
||||
masterCoprocessors = new String[numMasterCoprocessors];
|
||||
for (int i = 0; i < numMasterCoprocessors; i++) {
|
||||
|
@ -1284,6 +1284,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
||||
* @deprecated Writables are going away. Use pb {@link #parseFrom(byte[])} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
int version = in.readByte();
|
||||
if (version < 6) {
|
||||
@ -1367,6 +1368,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
|
||||
* @deprecated Writables are going away. Use {@link #toByteArray()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeByte(COLUMN_DESCRIPTOR_VERSION);
|
||||
Bytes.writeByteArray(out, this.name);
|
||||
|
@ -235,7 +235,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||
|
||||
private void setHashCode() {
|
||||
int result = Arrays.hashCode(this.regionName);
|
||||
result ^= this.regionId;
|
||||
result = (int) (result ^ this.regionId);
|
||||
result ^= Arrays.hashCode(this.startKey);
|
||||
result ^= Arrays.hashCode(this.endKey);
|
||||
result ^= Boolean.valueOf(this.offLine).hashCode();
|
||||
@ -995,15 +995,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||
KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a HRegionInfo to the protobuf RegionInfo
|
||||
*
|
||||
* @return the converted RegionInfo
|
||||
*/
|
||||
RegionInfo convert() {
|
||||
return convert(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a HRegionInfo to a RegionInfo
|
||||
*
|
||||
@ -1070,7 +1061,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||
* @see #parseFrom(byte[])
|
||||
*/
|
||||
public byte [] toByteArray() {
|
||||
byte [] bytes = convert().toByteArray();
|
||||
byte [] bytes = convert(this).toByteArray();
|
||||
return ProtobufUtil.prependPBMagic(bytes);
|
||||
}
|
||||
|
||||
@ -1148,7 +1139,7 @@ public class HRegionInfo implements Comparable<HRegionInfo> {
|
||||
* @see #toByteArray()
|
||||
*/
|
||||
public byte [] toDelimitedByteArray() throws IOException {
|
||||
return ProtobufUtil.toDelimitedByteArray(convert());
|
||||
return ProtobufUtil.toDelimitedByteArray(convert(this));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class InvalidFamilyOperationException extends DoNotRetryIOException {
|
||||
private static final long serialVersionUID = 1L << 22 - 1L;
|
||||
private static final long serialVersionUID = (1L << 22) - 1L;
|
||||
/** default constructor */
|
||||
public InvalidFamilyOperationException() {
|
||||
super();
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class MasterNotRunningException extends IOException {
|
||||
private static final long serialVersionUID = 1L << 23 - 1L;
|
||||
private static final long serialVersionUID = (1L << 23) - 1L;
|
||||
/** default constructor */
|
||||
public MasterNotRunningException() {
|
||||
super();
|
||||
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class NotServingRegionException extends IOException {
|
||||
private static final long serialVersionUID = 1L << 17 - 1L;
|
||||
private static final long serialVersionUID = (1L << 17) - 1L;
|
||||
|
||||
/** default constructor */
|
||||
public NotServingRegionException() {
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
@ -214,7 +215,7 @@ public class RegionLocations {
|
||||
HRegionLocation selectedLoc = selectRegionLocation(thisLoc,
|
||||
otherLoc, true, false);
|
||||
|
||||
if (selectedLoc != thisLoc) {
|
||||
if (!Objects.equals(selectedLoc, thisLoc)) {
|
||||
if (newLocations == null) {
|
||||
newLocations = new HRegionLocation[max];
|
||||
System.arraycopy(locations, 0, newLocations, 0, i);
|
||||
@ -277,7 +278,7 @@ public class RegionLocations {
|
||||
HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location,
|
||||
checkForEquals, force);
|
||||
|
||||
if (selectedLoc == oldLoc) {
|
||||
if (Objects.equals(selectedLoc, oldLoc)) {
|
||||
return this;
|
||||
}
|
||||
HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)];
|
||||
|
@ -287,11 +287,27 @@ public class ServerName implements Comparable<ServerName>, Serializable {
|
||||
|
||||
@Override
|
||||
public int compareTo(ServerName other) {
|
||||
int compare = this.getHostname().compareToIgnoreCase(other.getHostname());
|
||||
if (compare != 0) return compare;
|
||||
int compare;
|
||||
if (other == null) {
|
||||
return -1;
|
||||
}
|
||||
if (this.getHostname() == null) {
|
||||
if (other.getHostname() != null) {
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
if (other.getHostname() == null) {
|
||||
return -1;
|
||||
}
|
||||
compare = this.getHostname().compareToIgnoreCase(other.getHostname());
|
||||
if (compare != 0) {
|
||||
return compare;
|
||||
}
|
||||
}
|
||||
compare = this.getPort() - other.getPort();
|
||||
if (compare != 0) return compare;
|
||||
|
||||
if (compare != 0) {
|
||||
return compare;
|
||||
}
|
||||
return Long.compare(this.getStartcode(), other.getStartcode());
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.TableName;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class TableExistsException extends DoNotRetryIOException {
|
||||
private static final long serialVersionUID = 1L << 7 - 1L;
|
||||
private static final long serialVersionUID = (1L << 7) - 1L;
|
||||
/** default constructor */
|
||||
public TableExistsException() {
|
||||
super();
|
||||
|
@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class TableNotDisabledException extends DoNotRetryIOException {
|
||||
private static final long serialVersionUID = 1L << 19 - 1L;
|
||||
private static final long serialVersionUID = (1L << 19) - 1L;
|
||||
/** default constructor */
|
||||
public TableNotDisabledException() {
|
||||
super();
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class ZooKeeperConnectionException extends IOException {
|
||||
private static final long serialVersionUID = 1L << 23 - 1L;
|
||||
private static final long serialVersionUID = (1L << 23) - 1L;
|
||||
/** default constructor */
|
||||
public ZooKeeperConnectionException() {
|
||||
super();
|
||||
|
@ -52,6 +52,7 @@ public class Append extends Mutation {
|
||||
* A client that is not interested in the result can save network
|
||||
* bandwidth setting this to false.
|
||||
*/
|
||||
@Override
|
||||
public Append setReturnResults(boolean returnResults) {
|
||||
super.setReturnResults(returnResults);
|
||||
return this;
|
||||
@ -61,6 +62,7 @@ public class Append extends Mutation {
|
||||
* @return current setting for returnResults
|
||||
*/
|
||||
// This method makes public the superclasses's protected method.
|
||||
@Override
|
||||
public boolean isReturnResults() {
|
||||
return super.isReturnResults();
|
||||
}
|
||||
|
@ -103,6 +103,8 @@ import org.apache.htrace.Trace;
|
||||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
|
||||
justification="Synchronization on tasks in progress counter is intended")
|
||||
class AsyncProcess {
|
||||
private static final Log LOG = LogFactory.getLog(AsyncProcess.class);
|
||||
protected static final AtomicLong COUNTER = new AtomicLong();
|
||||
@ -380,7 +382,7 @@ class AsyncProcess {
|
||||
// we will do more retries in aggregate, but the user will be none the wiser.
|
||||
this.serverTrackerTimeout = 0;
|
||||
for (int i = 0; i < this.numTries; ++i) {
|
||||
serverTrackerTimeout += ConnectionUtils.getPauseTime(this.pause, i);
|
||||
serverTrackerTimeout = (int) (serverTrackerTimeout + ConnectionUtils.getPauseTime(this.pause, i));
|
||||
}
|
||||
|
||||
this.rpcCallerFactory = rpcCaller;
|
||||
|
@ -108,6 +108,7 @@ class FastFailInterceptorContext extends
|
||||
this.tries = tries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
server = null;
|
||||
fInfo = null;
|
||||
@ -118,10 +119,12 @@ class FastFailInterceptorContext extends
|
||||
tries = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FastFailInterceptorContext prepare(RetryingCallable<?> callable) {
|
||||
return prepare(callable, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FastFailInterceptorContext prepare(RetryingCallable<?> callable,
|
||||
int tries) {
|
||||
if (callable instanceof RegionServerCallable) {
|
||||
|
@ -243,6 +243,7 @@ public class Get extends Query
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Get setLoadColumnFamiliesOnDemand(boolean value) {
|
||||
return (Get) super.setLoadColumnFamiliesOnDemand(value);
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import com.google.protobuf.ServiceException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
@ -2460,6 +2461,7 @@ public class HBaseAdmin implements Admin {
|
||||
*
|
||||
* @return true if region normalizer is enabled, false otherwise.
|
||||
*/
|
||||
@Override
|
||||
public boolean isNormalizerEnabled() throws IOException {
|
||||
return executeCallable(new MasterCallable<Boolean>(getConnection()) {
|
||||
@Override
|
||||
@ -2478,6 +2480,7 @@ public class HBaseAdmin implements Admin {
|
||||
*
|
||||
* @return Previous normalizer value
|
||||
*/
|
||||
@Override
|
||||
public boolean setNormalizerRunning(final boolean on) throws IOException {
|
||||
return executeCallable(new MasterCallable<Boolean>(getConnection()) {
|
||||
@Override
|
||||
@ -2608,10 +2611,10 @@ public class HBaseAdmin implements Admin {
|
||||
public void mergeRegions(final byte[] nameOfRegionA,
|
||||
final byte[] nameOfRegionB, final boolean forcible)
|
||||
throws IOException {
|
||||
final byte[] encodedNameOfRegionA = isEncodedRegionName(nameOfRegionA) ?
|
||||
nameOfRegionA : HRegionInfo.encodeRegionName(nameOfRegionA).getBytes();
|
||||
final byte[] encodedNameOfRegionB = isEncodedRegionName(nameOfRegionB) ?
|
||||
nameOfRegionB : HRegionInfo.encodeRegionName(nameOfRegionB).getBytes();
|
||||
final byte[] encodedNameOfRegionA = isEncodedRegionName(nameOfRegionA) ? nameOfRegionA :
|
||||
HRegionInfo.encodeRegionName(nameOfRegionA).getBytes(StandardCharsets.UTF_8);
|
||||
final byte[] encodedNameOfRegionB = isEncodedRegionName(nameOfRegionB) ? nameOfRegionB :
|
||||
HRegionInfo.encodeRegionName(nameOfRegionB).getBytes(StandardCharsets.UTF_8);
|
||||
|
||||
Pair<HRegionInfo, ServerName> pair = getRegion(nameOfRegionA);
|
||||
if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
|
||||
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
@ -124,8 +125,7 @@ class HConnectionKey {
|
||||
for (String property : CONNECTION_PROPERTIES) {
|
||||
String thisValue = this.properties.get(property);
|
||||
String thatValue = that.properties.get(property);
|
||||
//noinspection StringEquality
|
||||
if (thisValue == thatValue) {
|
||||
if (Objects.equals(thisValue, thatValue)) {
|
||||
continue;
|
||||
}
|
||||
if (thisValue == null || !thisValue.equals(thatValue)) {
|
||||
|
@ -1808,6 +1808,7 @@ public class HTable implements HTableInterface, RegionLocator {
|
||||
return getKeysAndRegionsInRange(start, end, true).getFirst();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOperationTimeout(int operationTimeout) {
|
||||
this.operationTimeout = operationTimeout;
|
||||
if (mutator != null) {
|
||||
@ -1816,6 +1817,7 @@ public class HTable implements HTableInterface, RegionLocator {
|
||||
multiAp.setOperationTimeout(operationTimeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOperationTimeout() {
|
||||
return operationTimeout;
|
||||
}
|
||||
|
@ -468,7 +468,7 @@ public class HTableMultiplexer {
|
||||
}
|
||||
|
||||
public long getTotalBufferedCount() {
|
||||
return queue.size() + currentProcessingCount.get();
|
||||
return (long) queue.size() + currentProcessingCount.get();
|
||||
}
|
||||
|
||||
public AtomicAverageCounter getAverageLatencyCounter() {
|
||||
|
@ -308,6 +308,7 @@ public class HTablePool implements Closeable {
|
||||
* <p>
|
||||
* Note: this is a 'shutdown' of all the table pools.
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
for (String tableName : tables.keySet()) {
|
||||
closeTablePool(tableName);
|
||||
@ -524,6 +525,7 @@ public class HTablePool implements Closeable {
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
checkState();
|
||||
open = false;
|
||||
@ -635,7 +637,8 @@ public class HTablePool implements Closeable {
|
||||
|
||||
private void checkState() {
|
||||
if (!isOpen()) {
|
||||
throw new IllegalStateException("Table=" + new String(table.getTableName()) + " already closed");
|
||||
throw new IllegalStateException("Table=" + table.getName().getNameAsString()
|
||||
+ " already closed");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.util.ClassSize;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class Increment extends Mutation implements Comparable<Row> {
|
||||
private static final long HEAP_OVERHEAD = ClassSize.REFERENCE + ClassSize.TIMERANGE;
|
||||
private static final long HEAP_OVERHEAD = (long) ClassSize.REFERENCE + ClassSize.TIMERANGE;
|
||||
private TimeRange tr = new TimeRange();
|
||||
|
||||
/**
|
||||
@ -164,6 +164,7 @@ public class Increment extends Mutation implements Comparable<Row> {
|
||||
* client that is not interested in the result can save network bandwidth setting this
|
||||
* to false.
|
||||
*/
|
||||
@Override
|
||||
public Increment setReturnResults(boolean returnResults) {
|
||||
super.setReturnResults(returnResults);
|
||||
return this;
|
||||
@ -173,6 +174,7 @@ public class Increment extends Mutation implements Comparable<Row> {
|
||||
* @return current setting for returnResults
|
||||
*/
|
||||
// This method makes public the superclasses's protected method.
|
||||
@Override
|
||||
public boolean isReturnResults() {
|
||||
return super.isReturnResults();
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class NoServerForRegionException extends DoNotRetryRegionException {
|
||||
private static final long serialVersionUID = 1L << 11 - 1L;
|
||||
private static final long serialVersionUID = (1L << 11) - 1L;
|
||||
|
||||
/** default constructor */
|
||||
public NoServerForRegionException() {
|
||||
|
@ -38,10 +38,12 @@ public class PerClientRandomNonceGenerator implements NonceGenerator {
|
||||
this.clientId = (((long)Arrays.hashCode(clientIdBase)) << 32) + rdm.nextInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNonceGroup() {
|
||||
return this.clientId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long newNonce() {
|
||||
long result = HConstants.NO_NONCE;
|
||||
do {
|
||||
|
@ -38,7 +38,7 @@ class RegistryFactory {
|
||||
ZooKeeperRegistry.class.getName());
|
||||
Registry registry = null;
|
||||
try {
|
||||
registry = (Registry)Class.forName(registryClass).newInstance();
|
||||
registry = (Registry)Class.forName(registryClass).getDeclaredConstructor().newInstance();
|
||||
} catch (Throwable t) {
|
||||
throw new IOException(t);
|
||||
}
|
||||
|
@ -66,6 +66,7 @@ public class RpcRetryingCaller<T> {
|
||||
private final long pauseForCQTBE;
|
||||
private final int retries;
|
||||
private final int rpcTimeout;// timeout for each rpc request
|
||||
private final Object lock = new Object();
|
||||
private final AtomicBoolean cancelled = new AtomicBoolean(false);
|
||||
private final RetryingCallerInterceptor interceptor;
|
||||
private final RetryingCallerInterceptorContext context;
|
||||
@ -105,16 +106,16 @@ public class RpcRetryingCaller<T> {
|
||||
|
||||
private int getTimeout(int callTimeout){
|
||||
int timeout = getRemainingTime(callTimeout);
|
||||
if (timeout <= 0 || rpcTimeout > 0 && rpcTimeout < timeout){
|
||||
if (timeout <= 0 || (rpcTimeout > 0 && rpcTimeout < timeout)){
|
||||
timeout = rpcTimeout;
|
||||
}
|
||||
return timeout;
|
||||
}
|
||||
|
||||
public void cancel(){
|
||||
synchronized (cancelled){
|
||||
synchronized (lock){
|
||||
cancelled.set(true);
|
||||
cancelled.notifyAll();
|
||||
lock.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,9 +182,9 @@ public class RpcRetryingCaller<T> {
|
||||
}
|
||||
try {
|
||||
if (expectedSleep > 0) {
|
||||
synchronized (cancelled) {
|
||||
synchronized (lock) {
|
||||
if (cancelled.get()) return null;
|
||||
cancelled.wait(expectedSleep);
|
||||
lock.wait(expectedSleep);
|
||||
}
|
||||
}
|
||||
if (cancelled.get()) return null;
|
||||
|
@ -871,6 +871,7 @@ public class Scan extends Query {
|
||||
return allowPartialResults;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scan setLoadColumnFamiliesOnDemand(boolean value) {
|
||||
return (Scan) super.setLoadColumnFamiliesOnDemand(value);
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||
public class LongColumnInterpreter extends ColumnInterpreter<Long, Long,
|
||||
EmptyMsg, LongMsg, LongMsg> {
|
||||
|
||||
@Override
|
||||
public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv)
|
||||
throws IOException {
|
||||
if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG)
|
||||
@ -49,7 +50,7 @@ public class LongColumnInterpreter extends ColumnInterpreter<Long, Long,
|
||||
return Bytes.toLong(kv.getValueArray(), kv.getValueOffset());
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override
|
||||
public Long add(Long l1, Long l2) {
|
||||
if (l1 == null ^ l2 == null) {
|
||||
return (l1 == null) ? l2 : l1; // either of one is null.
|
||||
|
@ -656,8 +656,8 @@ public class ReplicationAdmin implements Closeable {
|
||||
admin = this.connection.getAdmin();
|
||||
HTableDescriptor htd = admin.getTableDescriptor(tableName);
|
||||
ReplicationState currentReplicationState = getTableReplicationState(htd);
|
||||
if (enableRep && currentReplicationState != ReplicationState.ENABLED
|
||||
|| !enableRep && currentReplicationState != ReplicationState.DISABLED) {
|
||||
if ((enableRep && currentReplicationState != ReplicationState.ENABLED)
|
||||
|| (!enableRep && currentReplicationState != ReplicationState.DISABLED)) {
|
||||
boolean isOnlineSchemaUpdateEnabled =
|
||||
this.connection.getConfiguration()
|
||||
.getBoolean("hbase.online.schema.update.enable", true);
|
||||
@ -716,6 +716,7 @@ public class ReplicationAdmin implements Closeable {
|
||||
return ReplicationState.DISABLED;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig)
|
||||
throws ReplicationException {
|
||||
String filterCSV = peerConfig.getConfiguration().
|
||||
@ -724,8 +725,8 @@ public class ReplicationAdmin implements Closeable {
|
||||
String[] filters = filterCSV.split(",");
|
||||
for (String filter : filters) {
|
||||
try {
|
||||
Class clazz = Class.forName(filter);
|
||||
Object o = clazz.newInstance();
|
||||
Class<?> clazz = Class.forName(filter);
|
||||
Object o = clazz.getDeclaredConstructor().newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new ReplicationException("Configured WALEntryFilter " + filter +
|
||||
" could not be created. Failing add/update " + "peer operation.", e);
|
||||
@ -783,12 +784,12 @@ public class ReplicationAdmin implements Closeable {
|
||||
* @see java.lang.Object#equals(java.lang.Object)
|
||||
*/
|
||||
private boolean compareForReplication(HTableDescriptor peerHtd, HTableDescriptor localHtd) {
|
||||
if (peerHtd == localHtd) {
|
||||
return true;
|
||||
}
|
||||
if (peerHtd == null) {
|
||||
return false;
|
||||
}
|
||||
if (peerHtd.equals(localHtd)) {
|
||||
return true;
|
||||
}
|
||||
boolean result = false;
|
||||
|
||||
// Create a copy of peer HTD as we need to change its replication
|
||||
|
@ -34,7 +34,7 @@ public enum SecurityCapability {
|
||||
CELL_AUTHORIZATION(3),
|
||||
CELL_VISIBILITY(4);
|
||||
|
||||
private int value;
|
||||
private final int value;
|
||||
|
||||
public int getValue() {
|
||||
return value;
|
||||
|
@ -50,6 +50,7 @@ public class BinaryComparator extends ByteArrayComparable {
|
||||
/**
|
||||
* @return The comparator serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
ComparatorProtos.BinaryComparator.Builder builder =
|
||||
ComparatorProtos.BinaryComparator.newBuilder();
|
||||
@ -79,6 +80,7 @@ public class BinaryComparator extends ByteArrayComparable {
|
||||
* @return true if and only if the fields of the comparator that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof BinaryComparator)) return false;
|
||||
|
@ -52,6 +52,7 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
|
||||
/**
|
||||
* @return The comparator serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
ComparatorProtos.BinaryPrefixComparator.Builder builder =
|
||||
ComparatorProtos.BinaryPrefixComparator.newBuilder();
|
||||
@ -81,6 +82,7 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
|
||||
* @return true if and only if the fields of the comparator that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof BinaryPrefixComparator)) return false;
|
||||
|
@ -66,6 +66,7 @@ public class BitComparator extends ByteArrayComparable {
|
||||
/**
|
||||
* @return The comparator serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
ComparatorProtos.BitComparator.Builder builder =
|
||||
ComparatorProtos.BitComparator.newBuilder();
|
||||
@ -99,6 +100,7 @@ public class BitComparator extends ByteArrayComparable {
|
||||
* @return true if and only if the fields of the comparator that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof BitComparator)) return false;
|
||||
|
@ -84,6 +84,7 @@ public class ColumnCountGetFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.ColumnCountGetFilter.Builder builder =
|
||||
FilterProtos.ColumnCountGetFilter.newBuilder();
|
||||
@ -113,6 +114,7 @@ public class ColumnCountGetFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof ColumnCountGetFilter)) return false;
|
||||
|
@ -174,6 +174,7 @@ public class ColumnPaginationFilter extends FilterBase
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.ColumnPaginationFilter.Builder builder =
|
||||
FilterProtos.ColumnPaginationFilter.newBuilder();
|
||||
@ -213,6 +214,7 @@ public class ColumnPaginationFilter extends FilterBase
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof ColumnPaginationFilter)) return false;
|
||||
|
@ -99,6 +99,7 @@ public class ColumnPrefixFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.ColumnPrefixFilter.Builder builder =
|
||||
FilterProtos.ColumnPrefixFilter.newBuilder();
|
||||
@ -128,6 +129,7 @@ public class ColumnPrefixFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof ColumnPrefixFilter)) return false;
|
||||
|
@ -143,8 +143,8 @@ public class ColumnRangeFilter extends FilterBase {
|
||||
int cmpMax = Bytes.compareTo(buffer, qualifierOffset, qualifierLength,
|
||||
this.maxColumn, 0, this.maxColumn.length);
|
||||
|
||||
if (this.maxColumnInclusive && cmpMax <= 0 ||
|
||||
!this.maxColumnInclusive && cmpMax < 0) {
|
||||
if ((this.maxColumnInclusive && cmpMax <= 0) ||
|
||||
(!this.maxColumnInclusive && cmpMax < 0)) {
|
||||
return ReturnCode.INCLUDE;
|
||||
}
|
||||
|
||||
@ -177,6 +177,7 @@ public class ColumnRangeFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.ColumnRangeFilter.Builder builder =
|
||||
FilterProtos.ColumnRangeFilter.newBuilder();
|
||||
@ -211,6 +212,7 @@ public class ColumnRangeFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof ColumnRangeFilter)) return false;
|
||||
|
@ -170,6 +170,7 @@ public abstract class CompareFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof CompareFilter)) return false;
|
||||
|
@ -217,6 +217,7 @@ public class DependentColumnFilter extends CompareFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.DependentColumnFilter.Builder builder =
|
||||
FilterProtos.DependentColumnFilter.newBuilder();
|
||||
@ -268,6 +269,7 @@ public class DependentColumnFilter extends CompareFilter {
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof DependentColumnFilter)) return false;
|
||||
|
@ -82,6 +82,7 @@ public class FamilyFilter extends CompareFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.FamilyFilter.Builder builder =
|
||||
FilterProtos.FamilyFilter.newBuilder();
|
||||
@ -121,6 +122,7 @@ public class FamilyFilter extends CompareFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof FamilyFilter)) return false;
|
||||
|
@ -142,6 +142,7 @@ public abstract class FilterBase extends Filter {
|
||||
*
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public Cell getNextCellHint(Cell currentKV) throws IOException {
|
||||
// Old filters based off of this class will override KeyValue getNextKeyHint(KeyValue).
|
||||
// Thus to maintain compatibility we need to call the old version.
|
||||
@ -154,6 +155,7 @@ public abstract class FilterBase extends Filter {
|
||||
*
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public boolean isFamilyEssential(byte[] name) throws IOException {
|
||||
return true;
|
||||
}
|
||||
@ -171,6 +173,7 @@ public abstract class FilterBase extends Filter {
|
||||
/**
|
||||
* Return filter's info for debugging and logging purpose.
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.getClass().getSimpleName();
|
||||
}
|
||||
@ -178,6 +181,7 @@ public abstract class FilterBase extends Filter {
|
||||
/**
|
||||
* Return length 0 byte array for Filters that don't require special serialization
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() throws IOException {
|
||||
return new byte[0];
|
||||
}
|
||||
@ -189,6 +193,7 @@ public abstract class FilterBase extends Filter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter other) {
|
||||
return true;
|
||||
}
|
||||
|
@ -221,6 +221,7 @@ final public class FilterList extends Filter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() throws IOException {
|
||||
FilterProtos.FilterList.Builder builder = FilterProtos.FilterList.newBuilder();
|
||||
builder.setOperator(FilterProtos.FilterList.Operator.valueOf(operator.name()));
|
||||
@ -262,6 +263,7 @@ final public class FilterList extends Filter {
|
||||
* @return true if and only if the fields of the filter that are serialized are equal to the
|
||||
* corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof FilterList)) return false;
|
||||
|
@ -54,6 +54,7 @@ final public class FilterWrapper extends Filter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() throws IOException {
|
||||
FilterProtos.FilterWrapper.Builder builder =
|
||||
FilterProtos.FilterWrapper.newBuilder();
|
||||
@ -181,6 +182,7 @@ final public class FilterWrapper extends Filter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof FilterWrapper)) return false;
|
||||
|
@ -42,6 +42,7 @@ public class FirstKeyOnlyFilter extends FilterBase {
|
||||
public FirstKeyOnlyFilter() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() {
|
||||
foundKV = false;
|
||||
}
|
||||
@ -84,6 +85,7 @@ public class FirstKeyOnlyFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.FirstKeyOnlyFilter.Builder builder =
|
||||
FilterProtos.FirstKeyOnlyFilter.newBuilder();
|
||||
@ -113,6 +115,7 @@ public class FirstKeyOnlyFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof FirstKeyOnlyFilter)) return false;
|
||||
|
@ -82,6 +82,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.FirstKeyValueMatchingQualifiersFilter.Builder builder =
|
||||
FilterProtos.FirstKeyValueMatchingQualifiersFilter.newBuilder();
|
||||
@ -118,6 +119,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof FirstKeyValueMatchingQualifiersFilter)) return false;
|
||||
|
@ -258,6 +258,7 @@ public class FuzzyRowFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
FilterProtos.FuzzyRowFilter.Builder builder = FilterProtos.FuzzyRowFilter.newBuilder();
|
||||
for (Pair<byte[], byte[]> fuzzyData : fuzzyKeysData) {
|
||||
@ -466,45 +467,55 @@ public class FuzzyRowFilter extends FilterBase {
|
||||
/** Abstracts directional comparisons based on scan direction. */
|
||||
private enum Order {
|
||||
ASC {
|
||||
@Override
|
||||
public boolean lt(int lhs, int rhs) {
|
||||
return lhs < rhs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean gt(int lhs, int rhs) {
|
||||
return lhs > rhs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte inc(byte val) {
|
||||
// TODO: what about over/underflow?
|
||||
return (byte) (val + 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isMax(byte val) {
|
||||
return val == (byte) 0xff;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte min() {
|
||||
return 0;
|
||||
}
|
||||
},
|
||||
DESC {
|
||||
@Override
|
||||
public boolean lt(int lhs, int rhs) {
|
||||
return lhs > rhs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean gt(int lhs, int rhs) {
|
||||
return lhs < rhs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte inc(byte val) {
|
||||
// TODO: what about over/underflow?
|
||||
return (byte) (val - 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isMax(byte val) {
|
||||
return val == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte min() {
|
||||
return (byte) 0xFF;
|
||||
}
|
||||
@ -627,6 +638,7 @@ public class FuzzyRowFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized are equal to the
|
||||
* corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof FuzzyRowFilter)) return false;
|
||||
|
@ -65,6 +65,7 @@ public class InclusiveStopFilter extends FilterBase {
|
||||
return v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRowKey(byte[] buffer, int offset, int length) {
|
||||
if (buffer == null) {
|
||||
//noinspection RedundantIfStatement
|
||||
@ -81,6 +82,7 @@ public class InclusiveStopFilter extends FilterBase {
|
||||
return done;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterAllRemaining() {
|
||||
return done;
|
||||
}
|
||||
@ -95,6 +97,7 @@ public class InclusiveStopFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.InclusiveStopFilter.Builder builder =
|
||||
FilterProtos.InclusiveStopFilter.newBuilder();
|
||||
@ -124,6 +127,7 @@ public class InclusiveStopFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof InclusiveStopFilter)) return false;
|
||||
|
@ -88,6 +88,7 @@ public class KeyOnlyFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.KeyOnlyFilter.Builder builder =
|
||||
FilterProtos.KeyOnlyFilter.newBuilder();
|
||||
@ -117,6 +118,7 @@ public class KeyOnlyFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof KeyOnlyFilter)) return false;
|
||||
|
@ -141,6 +141,7 @@ public class MultiRowRangeFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
FilterProtos.MultiRowRangeFilter.Builder builder = FilterProtos.MultiRowRangeFilter
|
||||
.newBuilder();
|
||||
@ -193,6 +194,7 @@ public class MultiRowRangeFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized are equal to the
|
||||
* corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this)
|
||||
return true;
|
||||
|
@ -115,6 +115,7 @@ public class MultipleColumnPrefixFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.MultipleColumnPrefixFilter.Builder builder =
|
||||
FilterProtos.MultipleColumnPrefixFilter.newBuilder();
|
||||
@ -152,6 +153,7 @@ public class MultipleColumnPrefixFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof MultipleColumnPrefixFilter)) return false;
|
||||
|
@ -62,6 +62,7 @@ public class NullComparator extends ByteArrayComparable {
|
||||
/**
|
||||
* @return The comparator serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
ComparatorProtos.NullComparator.Builder builder =
|
||||
ComparatorProtos.NullComparator.newBuilder();
|
||||
@ -90,6 +91,7 @@ public class NullComparator extends ByteArrayComparable {
|
||||
* @return true if and only if the fields of the comparator that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof NullComparator)) return false;
|
||||
|
@ -24,11 +24,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* Implementation of Filter interface that limits results to a specific page
|
||||
* size. It terminates scanning once the number of filter-passed rows is >
|
||||
@ -72,15 +72,18 @@ public class PageFilter extends FilterBase {
|
||||
return v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterAllRemaining() {
|
||||
return this.rowsAccepted >= this.pageSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRow() {
|
||||
this.rowsAccepted++;
|
||||
return this.rowsAccepted > this.pageSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasFilterRow() {
|
||||
return true;
|
||||
}
|
||||
@ -95,6 +98,7 @@ public class PageFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.PageFilter.Builder builder =
|
||||
FilterProtos.PageFilter.newBuilder();
|
||||
@ -124,6 +128,7 @@ public class PageFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof PageFilter)) return false;
|
||||
|
@ -30,6 +30,7 @@ import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.CharacterCodingException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EmptyStackException;
|
||||
@ -263,7 +264,7 @@ public class ParseFilter {
|
||||
e.printStackTrace();
|
||||
}
|
||||
throw new IllegalArgumentException("Incorrect filter string " +
|
||||
new String(filterStringAsByteArray));
|
||||
new String(filterStringAsByteArray, StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -811,9 +812,9 @@ public class ParseFilter {
|
||||
else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType))
|
||||
return new BinaryPrefixComparator(comparatorValue);
|
||||
else if (Bytes.equals(comparatorType, ParseConstants.regexStringType))
|
||||
return new RegexStringComparator(new String(comparatorValue));
|
||||
return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8));
|
||||
else if (Bytes.equals(comparatorType, ParseConstants.substringType))
|
||||
return new SubstringComparator(new String(comparatorValue));
|
||||
return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8));
|
||||
else
|
||||
throw new IllegalArgumentException("Incorrect comparatorType");
|
||||
}
|
||||
|
@ -50,6 +50,7 @@ public class PrefixFilter extends FilterBase {
|
||||
return prefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRowKey(byte[] buffer, int offset, int length) {
|
||||
if (buffer == null || this.prefix == null)
|
||||
return true;
|
||||
@ -80,14 +81,17 @@ public class PrefixFilter extends FilterBase {
|
||||
return v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRow() {
|
||||
return filterRow;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() {
|
||||
filterRow = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterAllRemaining() {
|
||||
return passedPrefix;
|
||||
}
|
||||
@ -102,6 +106,7 @@ public class PrefixFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.PrefixFilter.Builder builder =
|
||||
FilterProtos.PrefixFilter.newBuilder();
|
||||
@ -131,6 +136,7 @@ public class PrefixFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof PrefixFilter)) return false;
|
||||
|
@ -81,6 +81,7 @@ public class QualifierFilter extends CompareFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.QualifierFilter.Builder builder =
|
||||
FilterProtos.QualifierFilter.newBuilder();
|
||||
@ -120,6 +121,7 @@ public class QualifierFilter extends CompareFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof QualifierFilter)) return false;
|
||||
|
@ -90,7 +90,8 @@ public class RandomRowFilter extends FilterBase {
|
||||
public boolean filterRow() {
|
||||
return filterOutRow;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean hasFilterRow() {
|
||||
return true;
|
||||
}
|
||||
@ -118,6 +119,7 @@ public class RandomRowFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.RandomRowFilter.Builder builder =
|
||||
FilterProtos.RandomRowFilter.newBuilder();
|
||||
@ -147,6 +149,7 @@ public class RandomRowFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof RandomRowFilter)) return false;
|
||||
|
@ -97,6 +97,7 @@ public class RowFilter extends CompareFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.RowFilter.Builder builder =
|
||||
FilterProtos.RowFilter.newBuilder();
|
||||
@ -136,6 +137,7 @@ public class RowFilter extends CompareFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof RowFilter)) return false;
|
||||
|
@ -97,6 +97,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
|
||||
}
|
||||
|
||||
// We cleaned result row in FilterRow to be consistent with scanning process.
|
||||
@Override
|
||||
public boolean hasFilterRow() {
|
||||
return true;
|
||||
}
|
||||
@ -132,6 +133,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.SingleColumnValueExcludeFilter.Builder builder =
|
||||
FilterProtos.SingleColumnValueExcludeFilter.newBuilder();
|
||||
@ -175,6 +177,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof SingleColumnValueExcludeFilter)) return false;
|
||||
|
@ -216,16 +216,19 @@ public class SingleColumnValueFilter extends FilterBase {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRow() {
|
||||
// If column was found, return false if it was matched, true if it was not
|
||||
// If column not found, return true if we filter if missing, false if not
|
||||
return this.foundColumn? !this.matchedColumn: this.filterIfMissing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasFilterRow() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() {
|
||||
foundColumn = false;
|
||||
matchedColumn = false;
|
||||
@ -325,6 +328,7 @@ public class SingleColumnValueFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
return convert().toByteArray();
|
||||
}
|
||||
@ -364,6 +368,7 @@ public class SingleColumnValueFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof SingleColumnValueFilter)) return false;
|
||||
@ -382,6 +387,7 @@ public class SingleColumnValueFilter extends FilterBase {
|
||||
* column in whole scan. If filterIfMissing == false, all families are essential,
|
||||
* because of possibility of skipping the rows without any data in filtered CF.
|
||||
*/
|
||||
@Override
|
||||
public boolean isFamilyEssential(byte[] name) {
|
||||
return !this.filterIfMissing || Bytes.equals(name, this.columnFamily);
|
||||
}
|
||||
|
@ -87,10 +87,12 @@ public class SkipFilter extends FilterBase {
|
||||
return filter.transformCell(v);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean filterRow() {
|
||||
return filterRow;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasFilterRow() {
|
||||
return true;
|
||||
}
|
||||
@ -98,6 +100,7 @@ public class SkipFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() throws IOException {
|
||||
FilterProtos.SkipFilter.Builder builder =
|
||||
FilterProtos.SkipFilter.newBuilder();
|
||||
@ -131,6 +134,7 @@ public class SkipFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof SkipFilter)) return false;
|
||||
@ -139,6 +143,7 @@ public class SkipFilter extends FilterBase {
|
||||
return getFilter().areSerializedFieldsEqual(other.getFilter());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFamilyEssential(byte[] name) throws IOException {
|
||||
return filter.isFamilyEssential(name);
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ public class SubstringComparator extends ByteArrayComparable {
|
||||
/**
|
||||
* @return The comparator serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
ComparatorProtos.SubstringComparator.Builder builder =
|
||||
ComparatorProtos.SubstringComparator.newBuilder();
|
||||
@ -100,6 +101,7 @@ public class SubstringComparator extends ByteArrayComparable {
|
||||
* @return true if and only if the fields of the comparator that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(ByteArrayComparable other) {
|
||||
if (other == this) return true;
|
||||
if (!(other instanceof SubstringComparator)) return false;
|
||||
|
@ -124,6 +124,7 @@ public class TimestampsFilter extends FilterBase {
|
||||
*
|
||||
* @throws IOException This will never happen.
|
||||
*/
|
||||
@Override
|
||||
public Cell getNextCellHint(Cell currentCell) throws IOException {
|
||||
if (!canHint) {
|
||||
return null;
|
||||
@ -168,6 +169,7 @@ public class TimestampsFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
FilterProtos.TimestampsFilter.Builder builder =
|
||||
FilterProtos.TimestampsFilter.newBuilder();
|
||||
@ -199,6 +201,7 @@ public class TimestampsFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof TimestampsFilter)) return false;
|
||||
|
@ -77,6 +77,7 @@ public class ValueFilter extends CompareFilter {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte [] toByteArray() {
|
||||
FilterProtos.ValueFilter.Builder builder =
|
||||
FilterProtos.ValueFilter.newBuilder();
|
||||
@ -116,6 +117,7 @@ public class ValueFilter extends CompareFilter {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof ValueFilter)) return false;
|
||||
|
@ -52,6 +52,7 @@ public class WhileMatchFilter extends FilterBase {
|
||||
return filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reset() throws IOException {
|
||||
this.filter.reset();
|
||||
}
|
||||
@ -99,6 +100,7 @@ public class WhileMatchFilter extends FilterBase {
|
||||
/**
|
||||
* @return The filter serialized using pb
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() throws IOException {
|
||||
FilterProtos.WhileMatchFilter.Builder builder =
|
||||
FilterProtos.WhileMatchFilter.newBuilder();
|
||||
@ -132,6 +134,7 @@ public class WhileMatchFilter extends FilterBase {
|
||||
* @return true if and only if the fields of the filter that are serialized
|
||||
* are equal to the corresponding fields in other. Used for testing.
|
||||
*/
|
||||
@Override
|
||||
boolean areSerializedFieldsEqual(Filter o) {
|
||||
if (o == this) return true;
|
||||
if (!(o instanceof WhileMatchFilter)) return false;
|
||||
@ -140,6 +143,7 @@ public class WhileMatchFilter extends FilterBase {
|
||||
return getFilter().areSerializedFieldsEqual(other.getFilter());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFamilyEssential(byte[] name) throws IOException {
|
||||
return filter.isFamilyEssential(name);
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return (Codec) Class.forName(className).newInstance();
|
||||
return (Codec) Class.forName(className).getDeclaredConstructor().newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed getting codec " + className, e);
|
||||
}
|
||||
@ -270,7 +270,7 @@ public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcC
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return (CompressionCodec) Class.forName(className).newInstance();
|
||||
return (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed getting compressor " + className, e);
|
||||
}
|
||||
|
@ -67,6 +67,7 @@ public class BlockingRpcClient extends AbstractRpcClient<BlockingRpcConnection>
|
||||
* Creates a connection. Can be overridden by a subclass for testing.
|
||||
* @param remoteId - the ConnectionId to use for the connection creation.
|
||||
*/
|
||||
@Override
|
||||
protected BlockingRpcConnection createConnection(ConnectionId remoteId) throws IOException {
|
||||
return new BlockingRpcConnection(this, remoteId);
|
||||
}
|
||||
|
@ -57,20 +57,49 @@ public class ConnectionId {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj instanceof ConnectionId) {
|
||||
ConnectionId id = (ConnectionId) obj;
|
||||
return address.equals(id.address) &&
|
||||
((ticket != null && ticket.equals(id.ticket)) ||
|
||||
(ticket == id.ticket)) &&
|
||||
this.serviceName == id.serviceName;
|
||||
}
|
||||
return false;
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((address == null) ? 0 : address.hashCode());
|
||||
result = prime * result + ((serviceName == null) ? 0 : serviceName.hashCode());
|
||||
result = prime * result + ((ticket == null) ? 0 : ticket.hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override // simply use the default Object#hashcode() ?
|
||||
public int hashCode() {
|
||||
return hashCode(ticket,serviceName,address);
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
ConnectionId other = (ConnectionId) obj;
|
||||
if (address == null) {
|
||||
if (other.address != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!address.equals(other.address)) {
|
||||
return false;
|
||||
}
|
||||
if (serviceName == null) {
|
||||
if (other.serviceName != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!serviceName.equals(other.serviceName)) {
|
||||
return false;
|
||||
}
|
||||
if (ticket == null) {
|
||||
if (other.ticket != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!ticket.equals(other.ticket)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static int hashCode(User ticket, String serviceName, InetSocketAddress address){
|
||||
|
@ -446,10 +446,9 @@ public final class ResponseConverter {
|
||||
|
||||
public static Map<String, Long> getScanMetrics(ScanResponse response) {
|
||||
Map<String, Long> metricMap = new HashMap<String, Long>();
|
||||
if (response == null || !response.hasScanMetrics() || response.getScanMetrics() == null) {
|
||||
if (response == null || !response.hasScanMetrics()) {
|
||||
return metricMap;
|
||||
}
|
||||
|
||||
ScanMetrics metrics = response.getScanMetrics();
|
||||
int numberOfMetrics = metrics.getMetricsCount();
|
||||
for (int i = 0; i < numberOfMetrics; i++) {
|
||||
|
@ -67,6 +67,7 @@ public final class QuotaRetriever implements Closeable, Iterable<QuotaSettings>
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (this.table != null) {
|
||||
this.table.close();
|
||||
|
@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class RegionServerRunningException extends IOException {
|
||||
private static final long serialVersionUID = 1L << 31 - 1L;
|
||||
private static final long serialVersionUID = (1L << 31) - 1L;
|
||||
|
||||
/** Default Constructor */
|
||||
public RegionServerRunningException() {
|
||||
|
@ -39,14 +39,12 @@ import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||
|
||||
/**
|
||||
* This class provides an implementation of the ReplicationPeers interface using Zookeeper. The
|
||||
@ -80,14 +78,12 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
||||
// Map of peer clusters keyed by their id
|
||||
private Map<String, ReplicationPeerZKImpl> peerClusters;
|
||||
private final ReplicationQueuesClient queuesClient;
|
||||
private Abortable abortable;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ReplicationPeersZKImpl.class);
|
||||
|
||||
public ReplicationPeersZKImpl(final ZooKeeperWatcher zk, final Configuration conf,
|
||||
final ReplicationQueuesClient queuesClient, Abortable abortable) {
|
||||
super(zk, conf, abortable);
|
||||
this.abortable = abortable;
|
||||
this.peerClusters = new ConcurrentHashMap<String, ReplicationPeerZKImpl>();
|
||||
this.queuesClient = queuesClient;
|
||||
}
|
||||
|
@ -102,6 +102,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
|
||||
* Called when a new node has been created.
|
||||
* @param path full path of the new node
|
||||
*/
|
||||
@Override
|
||||
public void nodeCreated(String path) {
|
||||
refreshListIfRightPath(path);
|
||||
}
|
||||
@ -110,6 +111,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
|
||||
* Called when a node has been deleted
|
||||
* @param path full path of the deleted node
|
||||
*/
|
||||
@Override
|
||||
public void nodeDeleted(String path) {
|
||||
if (stopper.isStopped()) {
|
||||
return;
|
||||
@ -128,6 +130,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
|
||||
* Called when an existing node has a child node added or removed.
|
||||
* @param path full path of the node whose children have changed
|
||||
*/
|
||||
@Override
|
||||
public void nodeChildrenChanged(String path) {
|
||||
if (stopper.isStopped()) {
|
||||
return;
|
||||
@ -159,6 +162,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
|
||||
* Called when a node has been deleted
|
||||
* @param path full path of the deleted node
|
||||
*/
|
||||
@Override
|
||||
public void nodeDeleted(String path) {
|
||||
List<String> peers = refreshPeersList(path);
|
||||
if (peers == null) {
|
||||
@ -177,6 +181,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
|
||||
* Called when an existing node has a child node added or removed.
|
||||
* @param path full path of the node whose children have changed
|
||||
*/
|
||||
@Override
|
||||
public void nodeChildrenChanged(String path) {
|
||||
List<String> peers = refreshPeersList(path);
|
||||
if (peers == null) {
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hbase.security;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
@ -67,15 +68,15 @@ public class SaslUtil {
|
||||
}
|
||||
|
||||
static String encodeIdentifier(byte[] identifier) {
|
||||
return new String(Base64.encodeBase64(identifier));
|
||||
return new String(Base64.encodeBase64(identifier), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
static byte[] decodeIdentifier(String identifier) {
|
||||
return Base64.decodeBase64(identifier.getBytes());
|
||||
return Base64.decodeBase64(identifier.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
static char[] encodePassword(byte[] password) {
|
||||
return new String(Base64.encodeBase64(password)).toCharArray();
|
||||
return new String(Base64.encodeBase64(password), StandardCharsets.UTF_8).toCharArray();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -48,7 +48,7 @@ public class Permission extends VersionedWritable {
|
||||
public enum Action {
|
||||
READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A');
|
||||
|
||||
private byte code;
|
||||
private final byte code;
|
||||
Action(char code) {
|
||||
this.code = (byte)code;
|
||||
}
|
||||
|
@ -132,6 +132,7 @@ public class VisibilityClient {
|
||||
BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback =
|
||||
new BlockingRpcCallback<VisibilityLabelsResponse>();
|
||||
|
||||
@Override
|
||||
public VisibilityLabelsResponse call(VisibilityLabelsService service)
|
||||
throws IOException {
|
||||
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
|
||||
@ -217,6 +218,7 @@ public class VisibilityClient {
|
||||
BlockingRpcCallback<GetAuthsResponse> rpcCallback =
|
||||
new BlockingRpcCallback<GetAuthsResponse>();
|
||||
|
||||
@Override
|
||||
public GetAuthsResponse call(VisibilityLabelsService service) throws IOException {
|
||||
GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder();
|
||||
getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
|
||||
@ -268,6 +270,7 @@ public class VisibilityClient {
|
||||
BlockingRpcCallback<ListLabelsResponse> rpcCallback =
|
||||
new BlockingRpcCallback<ListLabelsResponse>();
|
||||
|
||||
@Override
|
||||
public ListLabelsResponse call(VisibilityLabelsService service) throws IOException {
|
||||
ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder();
|
||||
if (regex != null) {
|
||||
@ -332,6 +335,7 @@ public class VisibilityClient {
|
||||
BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback =
|
||||
new BlockingRpcCallback<VisibilityLabelsResponse>();
|
||||
|
||||
@Override
|
||||
public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
|
||||
SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder();
|
||||
setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
|
||||
|
@ -298,7 +298,7 @@ public class PoolMap<K, V> implements Map<K, V> {
|
||||
* the type of the resource
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
public class ReusablePool<R> extends ConcurrentLinkedQueue<R> implements Pool<R> {
|
||||
public static class ReusablePool<R> extends ConcurrentLinkedQueue<R> implements Pool<R> {
|
||||
private int maxSize;
|
||||
|
||||
public ReusablePool(int maxSize) {
|
||||
@ -342,7 +342,7 @@ public class PoolMap<K, V> implements Map<K, V> {
|
||||
*
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
class RoundRobinPool<R> extends CopyOnWriteArrayList<R> implements Pool<R> {
|
||||
static class RoundRobinPool<R> extends CopyOnWriteArrayList<R> implements Pool<R> {
|
||||
private int maxSize;
|
||||
private int nextResource = 0;
|
||||
|
||||
|
@ -30,5 +30,6 @@ public class EmptyWatcher implements Watcher {
|
||||
public static final EmptyWatcher instance = new EmptyWatcher();
|
||||
private EmptyWatcher() {}
|
||||
|
||||
@Override
|
||||
public void process(WatchedEvent event) {}
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ import java.io.PrintWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
@ -159,7 +160,7 @@ public class HQuorumPeer {
|
||||
}
|
||||
|
||||
File myIdFile = new File(dataDir, "myid");
|
||||
PrintWriter w = new PrintWriter(myIdFile);
|
||||
PrintWriter w = new PrintWriter(myIdFile, StandardCharsets.UTF_8.name());
|
||||
w.println(myId);
|
||||
w.close();
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ import org.apache.zookeeper.KeeperException;
|
||||
public class ZKLeaderManager extends ZooKeeperListener {
|
||||
private static final Log LOG = LogFactory.getLog(ZKLeaderManager.class);
|
||||
|
||||
private final Object lock = new Object();
|
||||
private final AtomicBoolean leaderExists = new AtomicBoolean();
|
||||
private String leaderZNode;
|
||||
private byte[] nodeId;
|
||||
@ -85,14 +86,14 @@ public class ZKLeaderManager extends ZooKeeperListener {
|
||||
|
||||
private void handleLeaderChange() {
|
||||
try {
|
||||
synchronized(leaderExists) {
|
||||
synchronized(lock) {
|
||||
if (ZKUtil.watchAndCheckExists(watcher, leaderZNode)) {
|
||||
LOG.info("Found new leader for znode: "+leaderZNode);
|
||||
leaderExists.set(true);
|
||||
} else {
|
||||
LOG.info("Leader change, but no new leader found");
|
||||
leaderExists.set(false);
|
||||
leaderExists.notifyAll();
|
||||
lock.notifyAll();
|
||||
}
|
||||
}
|
||||
} catch (KeeperException ke) {
|
||||
@ -136,10 +137,10 @@ public class ZKLeaderManager extends ZooKeeperListener {
|
||||
}
|
||||
|
||||
// wait for next chance
|
||||
synchronized(leaderExists) {
|
||||
synchronized(lock) {
|
||||
while (leaderExists.get() && !candidate.isStopped()) {
|
||||
try {
|
||||
leaderExists.wait();
|
||||
lock.wait();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.debug("Interrupted waiting on leader", ie);
|
||||
}
|
||||
@ -153,7 +154,7 @@ public class ZKLeaderManager extends ZooKeeperListener {
|
||||
*/
|
||||
public void stepDownAsLeader() {
|
||||
try {
|
||||
synchronized(leaderExists) {
|
||||
synchronized(lock) {
|
||||
if (!leaderExists.get()) {
|
||||
return;
|
||||
}
|
||||
|
@ -19,11 +19,14 @@
|
||||
package org.apache.hadoop.hbase.zookeeper;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.PrintWriter;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Deque;
|
||||
@ -1954,9 +1957,11 @@ public class ZKUtil {
|
||||
socket.connect(sockAddr, timeout);
|
||||
|
||||
socket.setSoTimeout(timeout);
|
||||
PrintWriter out = new PrintWriter(socket.getOutputStream(), true);
|
||||
BufferedReader in = new BufferedReader(new InputStreamReader(
|
||||
socket.getInputStream()));
|
||||
PrintWriter out = new PrintWriter(new BufferedWriter(
|
||||
new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)),
|
||||
true);
|
||||
BufferedReader in = new BufferedReader(
|
||||
new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8));
|
||||
out.println("stat");
|
||||
out.flush();
|
||||
ArrayList<String> res = new ArrayList<String>();
|
||||
|
@ -19,6 +19,9 @@ package org.apache.hadoop.hbase;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
@ -77,7 +80,8 @@ public class TestHColumnDescriptor {
|
||||
public void testHColumnDescriptorShouldThrowIAEWhenFamiliyNameEmpty()
|
||||
throws Exception {
|
||||
try {
|
||||
new HColumnDescriptor("".getBytes());
|
||||
new HColumnDescriptor("".getBytes(StandardCharsets.UTF_8));
|
||||
fail("Did not throw");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertEquals("Family name can not be empty", e.getLocalizedMessage());
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ public class TestHTableDescriptor {
|
||||
assertEquals(v, deserializedHtd.getMaxFileSize());
|
||||
assertTrue(deserializedHtd.isReadOnly());
|
||||
assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
|
||||
assertEquals(deserializedHtd.getRegionReplication(), 2);
|
||||
assertEquals(2, deserializedHtd.getRegionReplication());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +62,7 @@ public class TestInterfaceAudienceAnnotations {
|
||||
private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class);
|
||||
|
||||
/** Selects classes with generated in their package name */
|
||||
class GeneratedClassFilter implements ClassFinder.ClassFilter {
|
||||
static class GeneratedClassFilter implements ClassFinder.ClassFilter {
|
||||
@Override
|
||||
public boolean isCandidateClass(Class<?> c) {
|
||||
return c.getPackage().getName().contains("generated");
|
||||
@ -181,7 +181,7 @@ public class TestInterfaceAudienceAnnotations {
|
||||
}
|
||||
|
||||
/** Selects classes that are declared public */
|
||||
class PublicClassFilter implements ClassFinder.ClassFilter {
|
||||
static class PublicClassFilter implements ClassFinder.ClassFilter {
|
||||
@Override
|
||||
public boolean isCandidateClass(Class<?> c) {
|
||||
int mod = c.getModifiers();
|
||||
@ -190,7 +190,7 @@ public class TestInterfaceAudienceAnnotations {
|
||||
}
|
||||
|
||||
/** Selects paths (jars and class dirs) only from the main code, not test classes */
|
||||
class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter {
|
||||
static class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter {
|
||||
@Override
|
||||
public boolean isCandidatePath(String resourcePath, boolean isJar) {
|
||||
return !resourcePath.contains("test-classes") &&
|
||||
@ -207,7 +207,7 @@ public class TestInterfaceAudienceAnnotations {
|
||||
* - enclosing class is not an interface
|
||||
* - name starts with "__CLR"
|
||||
*/
|
||||
class CloverInstrumentationFilter implements ClassFinder.ClassFilter {
|
||||
static class CloverInstrumentationFilter implements ClassFinder.ClassFilter {
|
||||
@Override
|
||||
public boolean isCandidateClass(Class<?> clazz) {
|
||||
boolean clover = false;
|
||||
|
@ -24,8 +24,10 @@ import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
@ -93,10 +95,10 @@ public class TestAsyncProcess {
|
||||
private final static Log LOG = LogFactory.getLog(TestAsyncProcess.class);
|
||||
private static final TableName DUMMY_TABLE =
|
||||
TableName.valueOf("DUMMY_TABLE");
|
||||
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
|
||||
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
|
||||
private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
|
||||
private static final byte[] FAILS = "FAILS".getBytes();
|
||||
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes(StandardCharsets.UTF_8);
|
||||
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes(StandardCharsets.UTF_8);
|
||||
private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes(StandardCharsets.UTF_8);
|
||||
private static final byte[] FAILS = "FAILS".getBytes(StandardCharsets.UTF_8);
|
||||
private static final Configuration conf = new Configuration();
|
||||
|
||||
private static ServerName sn = ServerName.valueOf("s1:1,1");
|
||||
@ -353,7 +355,8 @@ public class TestAsyncProcess {
|
||||
return inc.getAndIncrement();
|
||||
}
|
||||
}
|
||||
class MyAsyncProcessWithReplicas extends MyAsyncProcess {
|
||||
|
||||
static class MyAsyncProcessWithReplicas extends MyAsyncProcess {
|
||||
private Set<byte[]> failures = new TreeSet<byte[]>(new Bytes.ByteArrayComparator());
|
||||
private long primarySleepMs = 0, replicaSleepMs = 0;
|
||||
private Map<ServerName, Long> customPrimarySleepMs = new HashMap<ServerName, Long>();
|
||||
@ -625,7 +628,13 @@ public class TestAsyncProcess {
|
||||
Random rn = new Random();
|
||||
final long limit = 10 * 1024 * 1024;
|
||||
final int requestCount = 1 + (int) (rn.nextDouble() * 3);
|
||||
long putsHeapSize = Math.abs(rn.nextLong()) % limit;
|
||||
long n = rn.nextLong();
|
||||
if (n < 0) {
|
||||
n = -n;
|
||||
} else if (n == 0) {
|
||||
n = 1;
|
||||
}
|
||||
long putsHeapSize = n % limit;
|
||||
long maxHeapSizePerRequest = putsHeapSize / requestCount;
|
||||
LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest +
|
||||
", putsHeapSize=" + putsHeapSize);
|
||||
@ -747,7 +756,7 @@ public class TestAsyncProcess {
|
||||
final AsyncRequestFuture ars = ap.submit(DUMMY_TABLE, puts, false, cb, false);
|
||||
Assert.assertTrue(puts.isEmpty());
|
||||
ars.waitUntilDone();
|
||||
Assert.assertEquals(updateCalled.get(), 1);
|
||||
Assert.assertEquals(1, updateCalled.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -759,12 +768,12 @@ public class TestAsyncProcess {
|
||||
puts.add(createPut(1, true));
|
||||
|
||||
for (int i = 0; i != ap.maxConcurrentTasksPerRegion; ++i) {
|
||||
ap.incTaskCounters(Arrays.asList(hri1.getRegionName()), sn);
|
||||
ap.incTaskCounters(Collections.singletonList(hri1.getRegionName()), sn);
|
||||
}
|
||||
ap.submit(DUMMY_TABLE, puts, false, null, false);
|
||||
Assert.assertEquals(puts.size(), 1);
|
||||
|
||||
ap.decTaskCounters(Arrays.asList(hri1.getRegionName()), sn);
|
||||
ap.decTaskCounters(Collections.singletonList(hri1.getRegionName()), sn);
|
||||
ap.submit(DUMMY_TABLE, puts, false, null, false);
|
||||
Assert.assertEquals(0, puts.size());
|
||||
}
|
||||
@ -945,7 +954,7 @@ public class TestAsyncProcess {
|
||||
final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
|
||||
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
ap.incTaskCounters(Arrays.asList("dummy".getBytes()), sn);
|
||||
ap.incTaskCounters(Collections.singletonList("dummy".getBytes(StandardCharsets.UTF_8)), sn);
|
||||
}
|
||||
|
||||
final Thread myThread = Thread.currentThread();
|
||||
@ -976,7 +985,7 @@ public class TestAsyncProcess {
|
||||
public void run() {
|
||||
Threads.sleep(sleepTime);
|
||||
while (ap.tasksInProgress.get() > 0) {
|
||||
ap.decTaskCounters(Arrays.asList("dummy".getBytes()), sn);
|
||||
ap.decTaskCounters(Collections.singletonList("dummy".getBytes(StandardCharsets.UTF_8)), sn);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1336,13 +1345,13 @@ public class TestAsyncProcess {
|
||||
} catch (RetriesExhaustedException expected) {
|
||||
}
|
||||
|
||||
Assert.assertEquals(res[0], success);
|
||||
Assert.assertEquals(res[1], success);
|
||||
Assert.assertEquals(res[2], success);
|
||||
Assert.assertEquals(res[3], success);
|
||||
Assert.assertEquals(res[4], failure);
|
||||
Assert.assertEquals(res[5], success);
|
||||
Assert.assertEquals(res[6], failure);
|
||||
Assert.assertEquals(success, res[0]);
|
||||
Assert.assertEquals(success, res[1]);
|
||||
Assert.assertEquals(success, res[2]);
|
||||
Assert.assertEquals(success, res[3]);
|
||||
Assert.assertEquals(failure, res[4]);
|
||||
Assert.assertEquals(success, res[5]);
|
||||
Assert.assertEquals(failure, res[6]);
|
||||
}
|
||||
@Test
|
||||
public void testErrorsServers() throws IOException {
|
||||
@ -1479,7 +1488,7 @@ public class TestAsyncProcess {
|
||||
|
||||
ht.batch(gets, new Object[gets.size()]);
|
||||
|
||||
Assert.assertEquals(ap.nbActions.get(), NB_REGS);
|
||||
Assert.assertEquals(NB_REGS, ap.nbActions.get());
|
||||
Assert.assertEquals("1 multi response per server", 2, ap.nbMultiResponse.get());
|
||||
Assert.assertEquals("1 thread per server", 2, con.nbThreads.get());
|
||||
|
||||
@ -1487,7 +1496,7 @@ public class TestAsyncProcess {
|
||||
for (int i =0; i<NB_REGS; i++){
|
||||
if (con.usedRegions[i]) nbReg++;
|
||||
}
|
||||
Assert.assertEquals("nbReg=" + nbReg, nbReg, NB_REGS);
|
||||
Assert.assertEquals("nbReg=" + nbReg, NB_REGS, nbReg);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -125,8 +125,8 @@ public class TestClientExponentialBackoff {
|
||||
|
||||
update(stats, 0, 98, 0);
|
||||
backoffTime = backoff.getBackoffTime(server, regionname, stats);
|
||||
assertEquals("We should be using max backoff when at high watermark", backoffTime,
|
||||
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
|
||||
assertEquals("We should be using max backoff when at high watermark",
|
||||
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoffTime);
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -149,8 +149,8 @@ public class TestClientExponentialBackoff {
|
||||
|
||||
update(stats, 0, 0, 100);
|
||||
backoffTime = backoff.getBackoffTime(server, regionname, stats);
|
||||
assertEquals("under heavy compaction pressure", backoffTime,
|
||||
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
|
||||
assertEquals("under heavy compaction pressure",
|
||||
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoffTime);
|
||||
}
|
||||
|
||||
private void update(ServerStatistics stats, int load) {
|
||||
|
@ -30,6 +30,7 @@ import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
@ -127,7 +128,8 @@ public class TestClientScanner {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testNoResultsHint() throws IOException {
|
||||
final Result[] results = new Result[1];
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
results[0] = Result.create(new Cell[] {kv1});
|
||||
|
||||
@ -188,7 +190,8 @@ public class TestClientScanner {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSizeLimit() throws IOException {
|
||||
final Result[] results = new Result[1];
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
results[0] = Result.create(new Cell[] {kv1});
|
||||
|
||||
@ -246,9 +249,14 @@ public class TestClientScanner {
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCacheLimit() throws IOException {
|
||||
KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv1 = new KeyValue("row1".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum),
|
||||
kv2 = new KeyValue("row2".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum),
|
||||
kv3 = new KeyValue("row3".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
final Result[] results = new Result[] {Result.create(new Cell[] {kv1}),
|
||||
Result.create(new Cell[] {kv2}), Result.create(new Cell[] {kv3})};
|
||||
@ -322,7 +330,8 @@ public class TestClientScanner {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testNoMoreResults() throws IOException {
|
||||
final Result[] results = new Result[1];
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
results[0] = Result.create(new Cell[] {kv1});
|
||||
|
||||
@ -381,12 +390,14 @@ public class TestClientScanner {
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testMoreResults() throws IOException {
|
||||
final Result[] results1 = new Result[1];
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv1 = new KeyValue("row".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
results1[0] = Result.create(new Cell[] {kv1});
|
||||
|
||||
final Result[] results2 = new Result[1];
|
||||
KeyValue kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1,
|
||||
KeyValue kv2 = new KeyValue("row2".getBytes(StandardCharsets.UTF_8),
|
||||
"cf".getBytes(StandardCharsets.UTF_8), "cq".getBytes(StandardCharsets.UTF_8), 1,
|
||||
Type.Maximum);
|
||||
results2[0] = Result.create(new Cell[] {kv2});
|
||||
|
||||
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
@ -33,8 +34,8 @@ public class TestDelayingRunner {
|
||||
|
||||
private static final TableName DUMMY_TABLE =
|
||||
TableName.valueOf("DUMMY_TABLE");
|
||||
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
|
||||
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
|
||||
private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes(StandardCharsets.UTF_8);
|
||||
private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes(StandardCharsets.UTF_8);
|
||||
private static HRegionInfo hri1 =
|
||||
new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
|
||||
|
||||
|
@ -30,6 +30,7 @@ import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@ -92,13 +93,14 @@ public class TestOperation {
|
||||
|
||||
private static String COL_NAME_1 = "col1";
|
||||
private static ColumnPrefixFilter COL_PRE_FILTER =
|
||||
new ColumnPrefixFilter(COL_NAME_1.getBytes());
|
||||
new ColumnPrefixFilter(COL_NAME_1.getBytes(StandardCharsets.UTF_8));
|
||||
private static String STR_COL_PRE_FILTER =
|
||||
COL_PRE_FILTER.getClass().getSimpleName() + " " + COL_NAME_1;
|
||||
|
||||
private static String COL_NAME_2 = "col2";
|
||||
private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter(
|
||||
COL_NAME_1.getBytes(), true, COL_NAME_2.getBytes(), false);
|
||||
COL_NAME_1.getBytes(StandardCharsets.UTF_8), true,
|
||||
COL_NAME_2.getBytes(StandardCharsets.UTF_8), false);
|
||||
private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName()
|
||||
+ " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")";
|
||||
|
||||
@ -117,25 +119,28 @@ public class TestOperation {
|
||||
|
||||
private static String STOP_ROW_KEY = "stop";
|
||||
private static InclusiveStopFilter IS_FILTER =
|
||||
new InclusiveStopFilter(STOP_ROW_KEY.getBytes());
|
||||
new InclusiveStopFilter(STOP_ROW_KEY.getBytes(StandardCharsets.UTF_8));
|
||||
private static String STR_IS_FILTER =
|
||||
IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY;
|
||||
|
||||
private static String PREFIX = "prefix";
|
||||
private static PrefixFilter PREFIX_FILTER =
|
||||
new PrefixFilter(PREFIX.getBytes());
|
||||
new PrefixFilter(PREFIX.getBytes(StandardCharsets.UTF_8));
|
||||
private static String STR_PREFIX_FILTER = "PrefixFilter " + PREFIX;
|
||||
|
||||
private static byte[][] PREFIXES = {
|
||||
"0".getBytes(), "1".getBytes(), "2".getBytes()};
|
||||
"0".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8),
|
||||
"2".getBytes(StandardCharsets.UTF_8)};
|
||||
private static MultipleColumnPrefixFilter MCP_FILTER =
|
||||
new MultipleColumnPrefixFilter(PREFIXES);
|
||||
private static String STR_MCP_FILTER =
|
||||
MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]";
|
||||
|
||||
private static byte[][] L_PREFIXES = {
|
||||
"0".getBytes(), "1".getBytes(), "2".getBytes(), "3".getBytes(),
|
||||
"4".getBytes(), "5".getBytes(), "6".getBytes(), "7".getBytes()};
|
||||
"0".getBytes(StandardCharsets.UTF_8), "1".getBytes(StandardCharsets.UTF_8),
|
||||
"2".getBytes(StandardCharsets.UTF_8), "3".getBytes(StandardCharsets.UTF_8),
|
||||
"4".getBytes(StandardCharsets.UTF_8), "5".getBytes(StandardCharsets.UTF_8),
|
||||
"6".getBytes(StandardCharsets.UTF_8), "7".getBytes(StandardCharsets.UTF_8)};
|
||||
private static MultipleColumnPrefixFilter L_MCP_FILTER =
|
||||
new MultipleColumnPrefixFilter(L_PREFIXES);
|
||||
private static String STR_L_MCP_FILTER =
|
||||
@ -165,7 +170,7 @@ public class TestOperation {
|
||||
FIRST_KEY_ONLY_FILTER.getClass().getSimpleName();
|
||||
|
||||
private static CompareOp CMP_OP = CompareOp.EQUAL;
|
||||
private static byte[] CMP_VALUE = "value".getBytes();
|
||||
private static byte[] CMP_VALUE = "value".getBytes(StandardCharsets.UTF_8);
|
||||
private static BinaryComparator BC = new BinaryComparator(CMP_VALUE);
|
||||
private static DependentColumnFilter DC_FILTER =
|
||||
new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC);
|
||||
@ -449,4 +454,3 @@ public class TestOperation {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ public class TestSnapshotFromAdmin {
|
||||
ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime;
|
||||
}
|
||||
// the correct wait time, capping at the maxTime/tries + fudge room
|
||||
final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300;
|
||||
final long time = pauseTime * 3L + ((maxWaitTime / numRetries) * 3) + 300;
|
||||
assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time "
|
||||
+ "- further testing won't prove anything.", time < ignoreExpectedTime);
|
||||
|
||||
|
@ -32,6 +32,7 @@ import com.google.common.base.Strings;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
@ -100,8 +101,10 @@ public class TestHBaseSaslRpcClient {
|
||||
@Test
|
||||
public void testSaslClientCallbackHandler() throws UnsupportedCallbackException {
|
||||
final Token<? extends TokenIdentifier> token = createTokenMock();
|
||||
when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes());
|
||||
when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes());
|
||||
when(token.getIdentifier())
|
||||
.thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8));
|
||||
when(token.getPassword())
|
||||
.thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
final NameCallback nameCallback = mock(NameCallback.class);
|
||||
final PasswordCallback passwordCallback = mock(PasswordCallback.class);
|
||||
@ -120,8 +123,10 @@ public class TestHBaseSaslRpcClient {
|
||||
@Test
|
||||
public void testSaslClientCallbackHandlerWithException() {
|
||||
final Token<? extends TokenIdentifier> token = createTokenMock();
|
||||
when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes());
|
||||
when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes());
|
||||
when(token.getIdentifier())
|
||||
.thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8));
|
||||
when(token.getPassword())
|
||||
.thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8));
|
||||
final SaslClientCallbackHandler saslClCallbackHandler = new SaslClientCallbackHandler(token);
|
||||
try {
|
||||
saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) });
|
||||
@ -291,8 +296,10 @@ public class TestHBaseSaslRpcClient {
|
||||
throws IOException {
|
||||
Token<? extends TokenIdentifier> token = createTokenMock();
|
||||
if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) {
|
||||
when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes());
|
||||
when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes());
|
||||
when(token.getIdentifier())
|
||||
.thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8));
|
||||
when(token.getPassword())
|
||||
.thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||
import org.apache.hadoop.hbase.security.Superusers;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
@ -51,7 +50,7 @@ public class TestZKUtil {
|
||||
String node = "/hbase/testUnsecure";
|
||||
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
|
||||
List<ACL> aclList = ZKUtil.createACL(watcher, node, false);
|
||||
Assert.assertEquals(aclList.size(), 1);
|
||||
Assert.assertEquals(1, aclList.size());
|
||||
Assert.assertTrue(aclList.contains(Ids.OPEN_ACL_UNSAFE.iterator().next()));
|
||||
}
|
||||
|
||||
@ -62,7 +61,7 @@ public class TestZKUtil {
|
||||
String node = "/hbase/testSecuritySingleSuperuser";
|
||||
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
|
||||
List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
|
||||
Assert.assertEquals(aclList.size(), 2); // 1+1, since ACL will be set for the creator by default
|
||||
Assert.assertEquals(2, aclList.size()); // 1+1, since ACL will be set for the creator by default
|
||||
Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1"))));
|
||||
Assert.assertTrue(aclList.contains(Ids.CREATOR_ALL_ACL.iterator().next()));
|
||||
}
|
||||
@ -74,7 +73,7 @@ public class TestZKUtil {
|
||||
String node = "/hbase/testCreateACL";
|
||||
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
|
||||
List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
|
||||
Assert.assertEquals(aclList.size(), 4); // 3+1, since ACL will be set for the creator by default
|
||||
Assert.assertEquals(4, aclList.size()); // 3+1, since ACL will be set for the creator by default
|
||||
Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
|
||||
Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group2"))));
|
||||
Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1"))));
|
||||
@ -90,13 +89,14 @@ public class TestZKUtil {
|
||||
String node = "/hbase/testCreateACL";
|
||||
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
|
||||
List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
|
||||
Assert.assertEquals(aclList.size(), 3); // 3, since service user the same as one of superuser
|
||||
Assert.assertEquals(3, aclList.size()); // 3, since service user the same as one of superuser
|
||||
Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
|
||||
Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", ""))));
|
||||
Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user5"))));
|
||||
Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user6"))));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInterruptedDuringAction()
|
||||
throws ZooKeeperConnectionException, IOException, KeeperException, InterruptedException {
|
||||
final RecoverableZooKeeper recoverableZk = Mockito.mock(RecoverableZooKeeper.class);
|
||||
|
Loading…
x
Reference in New Issue
Block a user