diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 80e22b9d67a..43a7973c5b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -823,7 +823,6 @@ class ConnectionManager { /** * An identifier that will remain the same for a given connection. - * @return */ @Override public String toString(){ @@ -2485,7 +2484,6 @@ class ConnectionManager { /** * Connects to the master to get the table descriptor. * @param tableName table name - * @return * @throws IOException if the connection to master fails or if the table * is not found. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 4c7f8805969..b2cd75577f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -191,7 +191,7 @@ public class HBaseAdmin implements Admin { /** * Constructor. - * See {@link #HBaseAdmin(HConnection connection)} + * See {@link #HBaseAdmin(Connection connection)} * * @param c Configuration object. Copied internally. */ @@ -210,10 +210,10 @@ public class HBaseAdmin implements Admin { /** - * Constructor for externally managed HConnections. + * Constructor for externally managed Connections. * The connection to master will be created when required by admin functions. * - * @param connection The HConnection instance to use + * @param connection The Connection instance to use * @throws MasterNotRunningException, ZooKeeperConnectionException are not * thrown anymore but kept into the interface for backward api compatibility * @deprecated Do not use this internal ctor. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index d114304b682..a1353844d53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -34,6 +34,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; * Similar to {@link RegionServerCallable} but for the AdminService interface. This service callable * assumes a Table and row and thus does region locating similar to RegionServerCallable. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD", + justification="stub used by ipc") public abstract class RegionAdminServiceCallable implements RetryingCallable { protected final ClusterConnection connection; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index ca5955685c2..4c5814e6115 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -378,6 +378,8 @@ public class RpcRetryingCallerWithReadReplicas { } } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RCN_REDUNDANT_NULLCHECK_OF_NULL_VALUE", + justification="Is this an issue?") @Override public Result get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { @@ -390,7 +392,7 @@ public class RpcRetryingCallerWithReadReplicas { } unit.timedWait(tasks, timeout); } - + // Findbugs says this null check is redundant. Will result be set across the wait above? if (result != null) { return result; } @@ -398,7 +400,7 @@ public class RpcRetryingCallerWithReadReplicas { throw exeEx; } - throw new TimeoutException(); + throw new TimeoutException("timeout=" + timeout + ", " + unit); } } @@ -416,7 +418,7 @@ public class RpcRetryingCallerWithReadReplicas { public QueueingFuture take() throws InterruptedException { synchronized (tasks) { - if (completed == null) tasks.wait(); + while (completed == null) tasks.wait(); } return completed; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 635daa15ab3..98d3b3ffd24 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -18,17 +18,8 @@ package org.apache.hadoop.hbase.protobuf; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.ListMultimap; -import com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.Message; -import com.google.protobuf.Parser; -import com.google.protobuf.RpcChannel; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; -import com.google.protobuf.TextFormat; +import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.lang.reflect.Constructor; @@ -45,6 +36,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -56,7 +48,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -96,7 +87,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -127,13 +117,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; -import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.QuotaScope; +import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; @@ -151,27 +142,23 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.ParameterizedType; -import java.lang.reflect.Type; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NavigableSet; - -import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Message; +import com.google.protobuf.Parser; +import com.google.protobuf.RpcChannel; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; +import com.google.protobuf.TextFormat; /** * Protobufs utility. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED", + justification="None. Address sometime.") public final class ProtobufUtil { private ProtobufUtil() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index a18d8e8478d..dce090394d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.replication; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -391,8 +390,14 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re if (peer == null) { return false; } - ((ConcurrentMap) peerClusters).putIfAbsent(peerId, peer); - LOG.info("Added new peer cluster " + peer.getPeerConfig().getClusterKey()); + ReplicationPeerZKImpl previous = + ((ConcurrentMap) peerClusters).putIfAbsent(peerId, peer); + if (previous == null) { + LOG.info("Added new peer cluster=" + peer.getPeerConfig().getClusterKey()); + } else { + LOG.info("Peer already present, " + previous.getPeerConfig().getClusterKey() + + ", new cluster=" + peer.getPeerConfig().getClusterKey()); + } return true; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index fc854c6e1be..a72f49c77a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -105,6 +105,8 @@ public class RecoverableZooKeeper { null); } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", + justification="None. Its always been this way.") public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis, String identifier) throws IOException { @@ -690,23 +692,23 @@ public class RecoverableZooKeeper { return newData; } - public long getSessionId() { - return zk == null ? null : zk.getSessionId(); + public synchronized long getSessionId() { + return zk == null ? -1 : zk.getSessionId(); } - public void close() throws InterruptedException { + public synchronized void close() throws InterruptedException { if (zk != null) zk.close(); } - public States getState() { + public synchronized States getState() { return zk == null ? null : zk.getState(); } - public ZooKeeper getZooKeeper() { + public synchronized ZooKeeper getZooKeeper() { return zk; } - public byte[] getSessionPasswd() { + public synchronized byte[] getSessionPasswd() { return zk == null ? null : zk.getSessionPasswd(); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index 48de281032c..cf11de17da2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -44,7 +44,8 @@ public class PrettyPrinter { return human.toString(); } - + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification="Will not overflow") private static String humanReadableTTL(final long interval){ StringBuilder sb = new StringBuilder(); int days, hours, minutes, seconds; diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index 006ab29ba01..012b3e5c2f4 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -45,7 +45,6 @@ public class PrefixTreeSeeker implements EncodedSeeker { protected ByteBuffer block; protected boolean includeMvccVersion; protected PrefixTreeArraySearcher ptSearcher; - protected boolean movedToPrevious = false; public PrefixTreeSeeker(boolean includeMvccVersion) { this.includeMvccVersion = includeMvccVersion; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 246e947b405..6341f2d5a0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -28,13 +28,12 @@ import java.nio.ByteBuffer; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -62,7 +61,7 @@ import com.google.common.base.Preconditions; * information from the block index are required to read a block. *
  • In version 2 a block is structured as follows: *
      - *
    • header (see {@link Writer#finishBlock()}) + *
    • header (see Writer#finishBlock()) *
        *
      • Magic record identifying the block type (8 bytes) *
      • Compressed block size, excluding header, including checksum (4 bytes) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index bde6282c6a5..0021cf42c50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -538,7 +538,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { @Override public void processHistogram(MetricName name, Histogram histogram, PrintStream stream) { super.processHistogram(name, histogram, stream); - stream.printf(Locale.getDefault(), " count = %d\n", histogram.count()); + stream.printf(Locale.getDefault(), " count = %d%n", histogram.count()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index a46ed9f7434..f8f9b4d5770 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -218,6 +218,8 @@ public class HFileOutputFormat2 * @return A WriterLength, containing a new StoreFile.Writer. * @throws IOException */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", + justification="Not important") private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException { WriterLength wl = new WriterLength(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 3988ae30cd3..c196eedc9e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils; * class ExampleTIF extends TableInputFormatBase implements JobConfigurable { * * public void configure(JobConf job) { - * Connection connection = + * Connection connection = * ConnectionFactory.createConnection(HBaseConfiguration.create(job)); * TableName tableName = TableName.valueOf("exampleTable"); * // mandatory @@ -91,7 +91,7 @@ extends InputFormat { final Log LOG = LogFactory.getLog(TableInputFormatBase.class); - /** Holds the details for the internal scanner. + /** Holds the details for the internal scanner. * * @see Scan */ private Scan scan = null; @@ -106,10 +106,7 @@ extends InputFormat { /** The reverse DNS lookup cache mapping: IPAddress => HostName */ private HashMap reverseDNSCacheMap = new HashMap(); - - /** The NameServer address */ - private String nameServer = null; - + /** * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses * the default. @@ -161,9 +158,6 @@ extends InputFormat { if (table == null) { throw new IOException("No table was provided."); } - // Get the name server address and the default value is null. - this.nameServer = - context.getConfiguration().get("hbase.nameserver.address", null); RegionSizeCalculator sizeCalculator = new RegionSizeCalculator((HTable) table); @@ -229,7 +223,7 @@ extends InputFormat { } return splits; } - + public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException { String hostName = this.reverseDNSCacheMap.get(ipAddress); if (hostName == null) { @@ -277,8 +271,8 @@ extends InputFormat { /** * Allows subclasses to get the {@link HTable}. - * - * @deprecated Use {@link #getTable()} and {@link #getRegionLocator()} instead. + * + * @deprecated */ @Deprecated protected HTable getHTable() { @@ -287,8 +281,8 @@ extends InputFormat { /** * Allows subclasses to set the {@link HTable}. - * - * @param table The {@link HTable} to get the data from. + * + * @param table The table to get the data from. * @deprecated Use {@link #initializeTable(Connection, TableName)} instead. */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 1de7676707b..f46f1e32a04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -55,7 +55,7 @@ extends RecordReader { /** * Sets the HBase table. * - * @param htable The {@link HTable} to scan. + * @param htable The table to scan. * @deprecated Use setTable() instead. */ @Deprecated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a6f457f9fac..5a7f2346905 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -540,6 +540,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NM_FIELD_NAMING_CONVENTION", + justification="Mistake. Too disruptive to change now") public static Action NullAction = new Action(Type.NULL); public void doAction(Action action) { @@ -785,6 +787,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } }; + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SBSC_USE_STRINGBUFFER_CONCATENATION", + justification="Not important but should be fixed") @Override public String toString() { String desc = "Cluster{" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 792618caad5..b97ebee3119 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -405,6 +405,8 @@ public class CompactSplitThread implements CompactionRequestor { return this.regionSplitLimit; } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", + justification="Contrived use of compareTo") private class CompactionRunner implements Runnable, Comparable { private final Store store; private final HRegion region; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java index 5e9013f8922..4a408e7bd29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RowTooBigException.java @@ -29,9 +29,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Public @Deprecated +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS", + justification="Temporary glue. To be removed") public class RowTooBigException extends org.apache.hadoop.hbase.client.RowTooBigException { - public RowTooBigException(String message) { super(message); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 31d347c27e6..3d4e99027b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Collection; -import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 122eeaf5681..c58ff147f5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -27,13 +27,12 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreUtils; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.base.Preconditions; import com.google.common.base.Predicate; @@ -321,6 +320,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy { return result; } + /** + * Used calculation jitter + */ + private final Random random = new Random(); + + /** + * @param filesToCompact + * @return When to run next major compaction + */ public long getNextMajorCompactTime(final Collection filesToCompact) { // default = 24hrs long ret = comConf.getMajorCompactionPeriod(); @@ -332,10 +340,15 @@ public class RatioBasedCompactionPolicy extends CompactionPolicy { // deterministic jitter avoids a major compaction storm on restart Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact); if (seed != null) { - double rnd = (new Random(seed)).nextDouble(); + // Synchronized to ensure one user of random instance at a time. + double rnd = -1; + synchronized (this) { + this.random.setSeed(seed); + rnd = this.random.nextDouble(); + } ret += jitter - Math.round(2L * jitter * rnd); } else { - ret = 0; // no storefiles == no major compaction + ret = 0; // If seed is null, then no storefiles == no major compaction } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 2ab036d760d..04a2ad66237 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -735,6 +735,8 @@ public class ReplicationSource extends Thread * @return true if we're done with the current file, false if we should * continue trying to read from it */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", + justification="Yeah, this is how it works") protected boolean processEndOfFile() { if (this.queue.size() != 0) { if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 0e59c8b3407..45dd9ee7af8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -220,6 +220,8 @@ public class SchemaResource extends ResourceBase { return update(model, false, uriInfo); } + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", + justification="Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 483da8f7a56..064f67c6dd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -144,7 +144,7 @@ public class RegionSplitter { * {@link HexStringSplit} to partition their table and set it as default, but * provided this for your custom algorithm. To use, create a new derived class * from this interface and call {@link RegionSplitter#createPresplitTable} or - * {@link RegionSplitter#rollingSplit(String, SplitAlgorithm, Configuration)} with the + * RegionSplitter#rollingSplit(TableName, SplitAlgorithm, Configuration) with the * argument splitClassName giving the name of your class. */ public interface SplitAlgorithm {