HBASE-12145 Fix javadoc and findbugs so new folks aren't freaked when they see them
Fix javadoc warnings. Fixup findbugs warnings mostly by adding annotations saying 'working as expected'. In RpcRetryingCallerWithReadReplicas made following change which findbugs spotted: - if (completed == null) tasks.wait(); + while (completed == null) tasks.wait(); In RecoverableZooKeeper, made all zk accesses synchronized -- we were doing it half-ways previously. In RatioBasedCompactionPolicy we were making an instance of Random on each invocation of getNextMajorCompactionTime
This commit is contained in:
parent
4ee6a73490
commit
43301167db
|
@ -823,7 +823,6 @@ class ConnectionManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An identifier that will remain the same for a given connection.
|
* An identifier that will remain the same for a given connection.
|
||||||
* @return
|
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String toString(){
|
public String toString(){
|
||||||
|
@ -2485,7 +2484,6 @@ class ConnectionManager {
|
||||||
/**
|
/**
|
||||||
* Connects to the master to get the table descriptor.
|
* Connects to the master to get the table descriptor.
|
||||||
* @param tableName table name
|
* @param tableName table name
|
||||||
* @return
|
|
||||||
* @throws IOException if the connection to master fails or if the table
|
* @throws IOException if the connection to master fails or if the table
|
||||||
* is not found.
|
* is not found.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -191,7 +191,7 @@ public class HBaseAdmin implements Admin {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor.
|
* Constructor.
|
||||||
* See {@link #HBaseAdmin(HConnection connection)}
|
* See {@link #HBaseAdmin(Connection connection)}
|
||||||
*
|
*
|
||||||
* @param c Configuration object. Copied internally.
|
* @param c Configuration object. Copied internally.
|
||||||
*/
|
*/
|
||||||
|
@ -210,10 +210,10 @@ public class HBaseAdmin implements Admin {
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor for externally managed HConnections.
|
* Constructor for externally managed Connections.
|
||||||
* The connection to master will be created when required by admin functions.
|
* The connection to master will be created when required by admin functions.
|
||||||
*
|
*
|
||||||
* @param connection The HConnection instance to use
|
* @param connection The Connection instance to use
|
||||||
* @throws MasterNotRunningException, ZooKeeperConnectionException are not
|
* @throws MasterNotRunningException, ZooKeeperConnectionException are not
|
||||||
* thrown anymore but kept into the interface for backward api compatibility
|
* thrown anymore but kept into the interface for backward api compatibility
|
||||||
* @deprecated Do not use this internal ctor.
|
* @deprecated Do not use this internal ctor.
|
||||||
|
|
|
@ -34,6 +34,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
|
||||||
* Similar to {@link RegionServerCallable} but for the AdminService interface. This service callable
|
* Similar to {@link RegionServerCallable} but for the AdminService interface. This service callable
|
||||||
* assumes a Table and row and thus does region locating similar to RegionServerCallable.
|
* assumes a Table and row and thus does region locating similar to RegionServerCallable.
|
||||||
*/
|
*/
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD",
|
||||||
|
justification="stub used by ipc")
|
||||||
public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<T> {
|
public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<T> {
|
||||||
|
|
||||||
protected final ClusterConnection connection;
|
protected final ClusterConnection connection;
|
||||||
|
|
|
@ -378,6 +378,8 @@ public class RpcRetryingCallerWithReadReplicas {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE",
|
||||||
|
justification="Is this an issue?")
|
||||||
@Override
|
@Override
|
||||||
public Result get(long timeout, TimeUnit unit)
|
public Result get(long timeout, TimeUnit unit)
|
||||||
throws InterruptedException, ExecutionException, TimeoutException {
|
throws InterruptedException, ExecutionException, TimeoutException {
|
||||||
|
@ -390,7 +392,7 @@ public class RpcRetryingCallerWithReadReplicas {
|
||||||
}
|
}
|
||||||
unit.timedWait(tasks, timeout);
|
unit.timedWait(tasks, timeout);
|
||||||
}
|
}
|
||||||
|
// Findbugs says this null check is redundant. Will result be set across the wait above?
|
||||||
if (result != null) {
|
if (result != null) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -398,7 +400,7 @@ public class RpcRetryingCallerWithReadReplicas {
|
||||||
throw exeEx;
|
throw exeEx;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw new TimeoutException();
|
throw new TimeoutException("timeout=" + timeout + ", " + unit);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,7 +418,7 @@ public class RpcRetryingCallerWithReadReplicas {
|
||||||
|
|
||||||
public QueueingFuture take() throws InterruptedException {
|
public QueueingFuture take() throws InterruptedException {
|
||||||
synchronized (tasks) {
|
synchronized (tasks) {
|
||||||
if (completed == null) tasks.wait();
|
while (completed == null) tasks.wait();
|
||||||
}
|
}
|
||||||
return completed;
|
return completed;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,17 +18,8 @@
|
||||||
package org.apache.hadoop.hbase.protobuf;
|
package org.apache.hadoop.hbase.protobuf;
|
||||||
|
|
||||||
|
|
||||||
import com.google.common.collect.ArrayListMultimap;
|
import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
|
||||||
import com.google.common.collect.ListMultimap;
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import com.google.protobuf.ByteString;
|
|
||||||
import com.google.protobuf.InvalidProtocolBufferException;
|
|
||||||
import com.google.protobuf.Message;
|
|
||||||
import com.google.protobuf.Parser;
|
|
||||||
import com.google.protobuf.RpcChannel;
|
|
||||||
import com.google.protobuf.Service;
|
|
||||||
import com.google.protobuf.ServiceException;
|
|
||||||
import com.google.protobuf.TextFormat;
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Constructor;
|
import java.lang.reflect.Constructor;
|
||||||
|
@ -45,6 +36,7 @@ import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
|
@ -56,7 +48,6 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
@ -96,7 +87,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
|
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
|
||||||
|
@ -127,13 +117,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos;
|
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
|
||||||
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaType;
|
|
||||||
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
import org.apache.hadoop.hbase.quotas.QuotaScope;
|
||||||
|
import org.apache.hadoop.hbase.quotas.QuotaType;
|
||||||
import org.apache.hadoop.hbase.quotas.ThrottleType;
|
import org.apache.hadoop.hbase.quotas.ThrottleType;
|
||||||
import org.apache.hadoop.hbase.security.access.Permission;
|
import org.apache.hadoop.hbase.security.access.Permission;
|
||||||
import org.apache.hadoop.hbase.security.access.TablePermission;
|
import org.apache.hadoop.hbase.security.access.TablePermission;
|
||||||
|
@ -151,27 +142,23 @@ import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import com.google.common.collect.ArrayListMultimap;
|
||||||
import java.io.IOException;
|
import com.google.common.collect.ListMultimap;
|
||||||
import java.lang.reflect.Constructor;
|
import com.google.common.collect.Lists;
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import com.google.protobuf.ByteString;
|
||||||
import java.lang.reflect.Method;
|
import com.google.protobuf.InvalidProtocolBufferException;
|
||||||
import java.lang.reflect.ParameterizedType;
|
import com.google.protobuf.Message;
|
||||||
import java.lang.reflect.Type;
|
import com.google.protobuf.Parser;
|
||||||
import java.nio.ByteBuffer;
|
import com.google.protobuf.RpcChannel;
|
||||||
import java.util.ArrayList;
|
import com.google.protobuf.Service;
|
||||||
import java.util.Collection;
|
import com.google.protobuf.ServiceException;
|
||||||
import java.util.HashMap;
|
import com.google.protobuf.TextFormat;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Map.Entry;
|
|
||||||
import java.util.NavigableSet;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Protobufs utility.
|
* Protobufs utility.
|
||||||
*/
|
*/
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED",
|
||||||
|
justification="None. Address sometime.")
|
||||||
public final class ProtobufUtil {
|
public final class ProtobufUtil {
|
||||||
|
|
||||||
private ProtobufUtil() {
|
private ProtobufUtil() {
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
package org.apache.hadoop.hbase.replication;
|
package org.apache.hadoop.hbase.replication;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -391,8 +390,14 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
||||||
if (peer == null) {
|
if (peer == null) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
ReplicationPeerZKImpl previous =
|
||||||
((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId, peer);
|
((ConcurrentMap<String, ReplicationPeerZKImpl>) peerClusters).putIfAbsent(peerId, peer);
|
||||||
LOG.info("Added new peer cluster " + peer.getPeerConfig().getClusterKey());
|
if (previous == null) {
|
||||||
|
LOG.info("Added new peer cluster=" + peer.getPeerConfig().getClusterKey());
|
||||||
|
} else {
|
||||||
|
LOG.info("Peer already present, " + previous.getPeerConfig().getClusterKey() +
|
||||||
|
", new cluster=" + peer.getPeerConfig().getClusterKey());
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,8 @@ public class RecoverableZooKeeper {
|
||||||
null);
|
null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
|
||||||
|
justification="None. Its always been this way.")
|
||||||
public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
|
public RecoverableZooKeeper(String quorumServers, int sessionTimeout,
|
||||||
Watcher watcher, int maxRetries, int retryIntervalMillis, String identifier)
|
Watcher watcher, int maxRetries, int retryIntervalMillis, String identifier)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -690,23 +692,23 @@ public class RecoverableZooKeeper {
|
||||||
return newData;
|
return newData;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getSessionId() {
|
public synchronized long getSessionId() {
|
||||||
return zk == null ? null : zk.getSessionId();
|
return zk == null ? -1 : zk.getSessionId();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() throws InterruptedException {
|
public synchronized void close() throws InterruptedException {
|
||||||
if (zk != null) zk.close();
|
if (zk != null) zk.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
public States getState() {
|
public synchronized States getState() {
|
||||||
return zk == null ? null : zk.getState();
|
return zk == null ? null : zk.getState();
|
||||||
}
|
}
|
||||||
|
|
||||||
public ZooKeeper getZooKeeper() {
|
public synchronized ZooKeeper getZooKeeper() {
|
||||||
return zk;
|
return zk;
|
||||||
}
|
}
|
||||||
|
|
||||||
public byte[] getSessionPasswd() {
|
public synchronized byte[] getSessionPasswd() {
|
||||||
return zk == null ? null : zk.getSessionPasswd();
|
return zk == null ? null : zk.getSessionPasswd();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,8 @@ public class PrettyPrinter {
|
||||||
return human.toString();
|
return human.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG",
|
||||||
|
justification="Will not overflow")
|
||||||
private static String humanReadableTTL(final long interval){
|
private static String humanReadableTTL(final long interval){
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
int days, hours, minutes, seconds;
|
int days, hours, minutes, seconds;
|
||||||
|
|
|
@ -45,7 +45,6 @@ public class PrefixTreeSeeker implements EncodedSeeker {
|
||||||
protected ByteBuffer block;
|
protected ByteBuffer block;
|
||||||
protected boolean includeMvccVersion;
|
protected boolean includeMvccVersion;
|
||||||
protected PrefixTreeArraySearcher ptSearcher;
|
protected PrefixTreeArraySearcher ptSearcher;
|
||||||
protected boolean movedToPrevious = false;
|
|
||||||
|
|
||||||
public PrefixTreeSeeker(boolean includeMvccVersion) {
|
public PrefixTreeSeeker(boolean includeMvccVersion) {
|
||||||
this.includeMvccVersion = includeMvccVersion;
|
this.includeMvccVersion = includeMvccVersion;
|
||||||
|
|
|
@ -28,13 +28,12 @@ import java.nio.ByteBuffer;
|
||||||
import java.util.concurrent.locks.Lock;
|
import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
|
@ -62,7 +61,7 @@ import com.google.common.base.Preconditions;
|
||||||
* information from the block index are required to read a block.
|
* information from the block index are required to read a block.
|
||||||
* <li>In version 2 a block is structured as follows:
|
* <li>In version 2 a block is structured as follows:
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>header (see {@link Writer#finishBlock()})
|
* <li>header (see Writer#finishBlock())
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>Magic record identifying the block type (8 bytes)
|
* <li>Magic record identifying the block type (8 bytes)
|
||||||
* <li>Compressed block size, excluding header, including checksum (4 bytes)
|
* <li>Compressed block size, excluding header, including checksum (4 bytes)
|
||||||
|
|
|
@ -538,7 +538,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
|
||||||
@Override
|
@Override
|
||||||
public void processHistogram(MetricName name, Histogram histogram, PrintStream stream) {
|
public void processHistogram(MetricName name, Histogram histogram, PrintStream stream) {
|
||||||
super.processHistogram(name, histogram, stream);
|
super.processHistogram(name, histogram, stream);
|
||||||
stream.printf(Locale.getDefault(), " count = %d\n", histogram.count());
|
stream.printf(Locale.getDefault(), " count = %d%n", histogram.count());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -218,6 +218,8 @@ public class HFileOutputFormat2
|
||||||
* @return A WriterLength, containing a new StoreFile.Writer.
|
* @return A WriterLength, containing a new StoreFile.Writer.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED",
|
||||||
|
justification="Not important")
|
||||||
private WriterLength getNewWriter(byte[] family, Configuration conf)
|
private WriterLength getNewWriter(byte[] family, Configuration conf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
WriterLength wl = new WriterLength();
|
WriterLength wl = new WriterLength();
|
||||||
|
|
|
@ -107,9 +107,6 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
||||||
private HashMap<InetAddress, String> reverseDNSCacheMap =
|
private HashMap<InetAddress, String> reverseDNSCacheMap =
|
||||||
new HashMap<InetAddress, String>();
|
new HashMap<InetAddress, String>();
|
||||||
|
|
||||||
/** The NameServer address */
|
|
||||||
private String nameServer = null;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
|
* Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses
|
||||||
* the default.
|
* the default.
|
||||||
|
@ -161,9 +158,6 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
||||||
if (table == null) {
|
if (table == null) {
|
||||||
throw new IOException("No table was provided.");
|
throw new IOException("No table was provided.");
|
||||||
}
|
}
|
||||||
// Get the name server address and the default value is null.
|
|
||||||
this.nameServer =
|
|
||||||
context.getConfiguration().get("hbase.nameserver.address", null);
|
|
||||||
|
|
||||||
RegionSizeCalculator sizeCalculator = new RegionSizeCalculator((HTable) table);
|
RegionSizeCalculator sizeCalculator = new RegionSizeCalculator((HTable) table);
|
||||||
|
|
||||||
|
@ -278,7 +272,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
||||||
/**
|
/**
|
||||||
* Allows subclasses to get the {@link HTable}.
|
* Allows subclasses to get the {@link HTable}.
|
||||||
*
|
*
|
||||||
* @deprecated Use {@link #getTable()} and {@link #getRegionLocator()} instead.
|
* @deprecated
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
protected HTable getHTable() {
|
protected HTable getHTable() {
|
||||||
|
@ -288,7 +282,7 @@ extends InputFormat<ImmutableBytesWritable, Result> {
|
||||||
/**
|
/**
|
||||||
* Allows subclasses to set the {@link HTable}.
|
* Allows subclasses to set the {@link HTable}.
|
||||||
*
|
*
|
||||||
* @param table The {@link HTable} to get the data from.
|
* @param table The table to get the data from.
|
||||||
* @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
|
* @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
|
|
@ -55,7 +55,7 @@ extends RecordReader<ImmutableBytesWritable, Result> {
|
||||||
/**
|
/**
|
||||||
* Sets the HBase table.
|
* Sets the HBase table.
|
||||||
*
|
*
|
||||||
* @param htable The {@link HTable} to scan.
|
* @param htable The table to scan.
|
||||||
* @deprecated Use setTable() instead.
|
* @deprecated Use setTable() instead.
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
|
|
@ -540,6 +540,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NM_FIELD_NAMING_CONVENTION",
|
||||||
|
justification="Mistake. Too disruptive to change now")
|
||||||
public static Action NullAction = new Action(Type.NULL);
|
public static Action NullAction = new Action(Type.NULL);
|
||||||
|
|
||||||
public void doAction(Action action) {
|
public void doAction(Action action) {
|
||||||
|
@ -785,6 +787,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SBSC_USE_STRINGBUFFER_CONCATENATION",
|
||||||
|
justification="Not important but should be fixed")
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
String desc = "Cluster{" +
|
String desc = "Cluster{" +
|
||||||
|
|
|
@ -405,6 +405,8 @@ public class CompactSplitThread implements CompactionRequestor {
|
||||||
return this.regionSplitLimit;
|
return this.regionSplitLimit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
|
||||||
|
justification="Contrived use of compareTo")
|
||||||
private class CompactionRunner implements Runnable, Comparable<CompactionRunner> {
|
private class CompactionRunner implements Runnable, Comparable<CompactionRunner> {
|
||||||
private final Store store;
|
private final Store store;
|
||||||
private final HRegion region;
|
private final HRegion region;
|
||||||
|
|
|
@ -29,8 +29,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS",
|
||||||
|
justification="Temporary glue. To be removed")
|
||||||
public class RowTooBigException extends org.apache.hadoop.hbase.client.RowTooBigException {
|
public class RowTooBigException extends org.apache.hadoop.hbase.client.RowTooBigException {
|
||||||
|
|
||||||
public RowTooBigException(String message) {
|
public RowTooBigException(String message) {
|
||||||
super(message);
|
super(message);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
|
|
@ -27,13 +27,12 @@ import java.util.Random;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
|
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreUtils;
|
import org.apache.hadoop.hbase.regionserver.StoreUtils;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
|
@ -321,6 +320,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used calculation jitter
|
||||||
|
*/
|
||||||
|
private final Random random = new Random();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param filesToCompact
|
||||||
|
* @return When to run next major compaction
|
||||||
|
*/
|
||||||
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
|
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
|
||||||
// default = 24hrs
|
// default = 24hrs
|
||||||
long ret = comConf.getMajorCompactionPeriod();
|
long ret = comConf.getMajorCompactionPeriod();
|
||||||
|
@ -332,10 +340,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy {
|
||||||
// deterministic jitter avoids a major compaction storm on restart
|
// deterministic jitter avoids a major compaction storm on restart
|
||||||
Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
|
Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
|
||||||
if (seed != null) {
|
if (seed != null) {
|
||||||
double rnd = (new Random(seed)).nextDouble();
|
// Synchronized to ensure one user of random instance at a time.
|
||||||
|
double rnd = -1;
|
||||||
|
synchronized (this) {
|
||||||
|
this.random.setSeed(seed);
|
||||||
|
rnd = this.random.nextDouble();
|
||||||
|
}
|
||||||
ret += jitter - Math.round(2L * jitter * rnd);
|
ret += jitter - Math.round(2L * jitter * rnd);
|
||||||
} else {
|
} else {
|
||||||
ret = 0; // no storefiles == no major compaction
|
ret = 0; // If seed is null, then no storefiles == no major compaction
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -735,6 +735,8 @@ public class ReplicationSource extends Thread
|
||||||
* @return true if we're done with the current file, false if we should
|
* @return true if we're done with the current file, false if we should
|
||||||
* continue trying to read from it
|
* continue trying to read from it
|
||||||
*/
|
*/
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
|
||||||
|
justification="Yeah, this is how it works")
|
||||||
protected boolean processEndOfFile() {
|
protected boolean processEndOfFile() {
|
||||||
if (this.queue.size() != 0) {
|
if (this.queue.size() != 0) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
|
|
|
@ -220,6 +220,8 @@ public class SchemaResource extends ResourceBase {
|
||||||
return update(model, false, uriInfo);
|
return update(model, false, uriInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
|
||||||
|
justification="Expected")
|
||||||
@DELETE
|
@DELETE
|
||||||
public Response delete(final @Context UriInfo uriInfo) {
|
public Response delete(final @Context UriInfo uriInfo) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
|
|
@ -144,7 +144,7 @@ public class RegionSplitter {
|
||||||
* {@link HexStringSplit} to partition their table and set it as default, but
|
* {@link HexStringSplit} to partition their table and set it as default, but
|
||||||
* provided this for your custom algorithm. To use, create a new derived class
|
* provided this for your custom algorithm. To use, create a new derived class
|
||||||
* from this interface and call {@link RegionSplitter#createPresplitTable} or
|
* from this interface and call {@link RegionSplitter#createPresplitTable} or
|
||||||
* {@link RegionSplitter#rollingSplit(String, SplitAlgorithm, Configuration)} with the
|
* RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the
|
||||||
* argument splitClassName giving the name of your class.
|
* argument splitClassName giving the name of your class.
|
||||||
*/
|
*/
|
||||||
public interface SplitAlgorithm {
|
public interface SplitAlgorithm {
|
||||||
|
|
Loading…
Reference in New Issue