HBASE-7508 Fix simple findbugs

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1430169 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
nkeywal 2013-01-08 08:31:11 +00:00
parent 176ddb066c
commit 4e0af48fe4
20 changed files with 80 additions and 56 deletions

View File

@ -36,6 +36,11 @@
<Package name="org.apache.hadoop.hbase.coprocessor.example.generated"/> <Package name="org.apache.hadoop.hbase.coprocessor.example.generated"/>
</Match> </Match>
<Match>
<Package name="org.apache.hadoop.hbase.tmpl"/>
</Match>
<Match> <Match>
<Class name="org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost"/> <Class name="org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost"/>
<Or> <Or>

View File

@ -19,7 +19,7 @@ MAVEN_OPTS="-Xmx3g"
# Please update the per-module test-patch.properties if you update this file. # Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=84 OK_RELEASEAUDIT_WARNINGS=84
OK_FINDBUGS_WARNINGS=226 OK_FINDBUGS_WARNINGS=127
# Allow two warnings. Javadoc complains about sun.misc.Unsafe use. See HBASE-7457 # Allow two warnings. Javadoc complains about sun.misc.Unsafe use. See HBASE-7457
OK_JAVADOC_WARNINGS=2 OK_JAVADOC_WARNINGS=2

View File

@ -80,37 +80,37 @@ public class KeyValue implements Cell, HeapSize {
/** /**
* Comparator for plain key/values; i.e. non-catalog table key/values. * Comparator for plain key/values; i.e. non-catalog table key/values.
*/ */
public static KVComparator COMPARATOR = new KVComparator(); public static final KVComparator COMPARATOR = new KVComparator();
/** /**
* Comparator for plain key; i.e. non-catalog table key. Works on Key portion * Comparator for plain key; i.e. non-catalog table key. Works on Key portion
* of KeyValue only. * of KeyValue only.
*/ */
public static KeyComparator KEY_COMPARATOR = new KeyComparator(); public static final KeyComparator KEY_COMPARATOR = new KeyComparator();
/** /**
* A {@link KVComparator} for <code>.META.</code> catalog table * A {@link KVComparator} for <code>.META.</code> catalog table
* {@link KeyValue}s. * {@link KeyValue}s.
*/ */
public static KVComparator META_COMPARATOR = new MetaComparator(); public static final KVComparator META_COMPARATOR = new MetaComparator();
/** /**
* A {@link KVComparator} for <code>.META.</code> catalog table * A {@link KVComparator} for <code>.META.</code> catalog table
* {@link KeyValue} keys. * {@link KeyValue} keys.
*/ */
public static KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator(); public static final KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
/** /**
* A {@link KVComparator} for <code>-ROOT-</code> catalog table * A {@link KVComparator} for <code>-ROOT-</code> catalog table
* {@link KeyValue}s. * {@link KeyValue}s.
*/ */
public static KVComparator ROOT_COMPARATOR = new RootComparator(); public static final KVComparator ROOT_COMPARATOR = new RootComparator();
/** /**
* A {@link KVComparator} for <code>-ROOT-</code> catalog table * A {@link KVComparator} for <code>-ROOT-</code> catalog table
* {@link KeyValue} keys. * {@link KeyValue} keys.
*/ */
public static KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator(); public static final KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
/** /**
* Get the appropriate row comparator for the specified table. * Get the appropriate row comparator for the specified table.

View File

@ -34,7 +34,6 @@ import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell;
/** /**
* This class is a wrapper for the implementation of * This class is a wrapper for the implementation of
@ -127,6 +126,7 @@ public class JVM
ofdc = runUnixMXBeanMethod("getOpenFileDescriptorCount"); ofdc = runUnixMXBeanMethod("getOpenFileDescriptorCount");
return (ofdc != null ? ofdc.longValue () : -1); return (ofdc != null ? ofdc.longValue () : -1);
} }
InputStream in = null;
try { try {
//need to get the PID number of the process first //need to get the PID number of the process first
RuntimeMXBean rtmbean = ManagementFactory.getRuntimeMXBean(); RuntimeMXBean rtmbean = ManagementFactory.getRuntimeMXBean();
@ -137,7 +137,7 @@ public class JVM
Process p = Runtime.getRuntime().exec( Process p = Runtime.getRuntime().exec(
new String[] { "bash", "-c", new String[] { "bash", "-c",
"ls /proc/" + pidhost[0] + "/fdinfo | wc -l" }); "ls /proc/" + pidhost[0] + "/fdinfo | wc -l" });
InputStream in = p.getInputStream(); in = p.getInputStream();
BufferedReader output = new BufferedReader( BufferedReader output = new BufferedReader(
new InputStreamReader(in)); new InputStreamReader(in));
@ -146,6 +146,14 @@ public class JVM
return Long.parseLong(openFileDesCount); return Long.parseLong(openFileDesCount);
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn("Not able to get the number of open file descriptors", ie); LOG.warn("Not able to get the number of open file descriptors", ie);
} finally {
if (in != null){
try {
in.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
} }
return -1; return -1;
} }
@ -164,13 +172,14 @@ public class JVM
mfdc = runUnixMXBeanMethod("getMaxFileDescriptorCount"); mfdc = runUnixMXBeanMethod("getMaxFileDescriptorCount");
return (mfdc != null ? mfdc.longValue () : -1); return (mfdc != null ? mfdc.longValue () : -1);
} }
InputStream in = null;
try { try {
//using linux bash commands to retrieve info //using linux bash commands to retrieve info
Process p = Runtime.getRuntime().exec( Process p = Runtime.getRuntime().exec(
new String[] { "bash", "-c", new String[] { "bash", "-c",
"ulimit -n" }); "ulimit -n" });
InputStream in = p.getInputStream(); in = p.getInputStream();
BufferedReader output = new BufferedReader( BufferedReader output = new BufferedReader(
new InputStreamReader(in)); new InputStreamReader(in));
@ -179,6 +188,14 @@ public class JVM
return Long.parseLong(maxFileDesCount); return Long.parseLong(maxFileDesCount);
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn("Not able to get the max number of file descriptors", ie); LOG.warn("Not able to get the max number of file descriptors", ie);
} finally {
if (in != null){
try {
in.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
} }
return -1; return -1;
} }

View File

@ -63,8 +63,8 @@ import org.apache.zookeeper.ZooKeeper;
* listeners registered with ZooKeeperWatcher cannot be removed. * listeners registered with ZooKeeperWatcher cannot be removed.
*/ */
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
public static String node = "/backup/example/lastbackup"; public static final String node = "/backup/example/lastbackup";
public static String zkkey = "ZK"; public static final String zkkey = "ZK";
private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class); private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class);
/** /**
@ -93,6 +93,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
* *
* @return the last know version of the data * @return the last know version of the data
*/ */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION")
public byte[] getData() { public byte[] getData() {
// try at most twice/minute // try at most twice/minute
if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) { if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) {

View File

@ -127,7 +127,7 @@ public class DemoClient {
// Create the demo table with two column families, entry: and unused: // Create the demo table with two column families, entry: and unused:
// //
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>(); ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
ColumnDescriptor col = null; ColumnDescriptor col;
col = new ColumnDescriptor(); col = new ColumnDescriptor();
col.name = ByteBuffer.wrap(bytes("entry:")); col.name = ByteBuffer.wrap(bytes("entry:"));
col.maxVersions = 10; col.maxVersions = 10;
@ -227,7 +227,7 @@ public class DemoClient {
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes); client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes)); printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
Mutation m = null; Mutation m;
mutations = new ArrayList<Mutation>(); mutations = new ArrayList<Mutation>();
m = new Mutation(); m = new Mutation();
m.column = ByteBuffer.wrap(bytes("entry:foo")); m.column = ByteBuffer.wrap(bytes("entry:foo"));
@ -272,7 +272,7 @@ public class DemoClient {
} }
List<TCell> result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes); List<TCell> result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes);
if (result.isEmpty() == false) { if (!result.isEmpty()) {
System.out.println("FATAL: shouldn't get here"); System.out.println("FATAL: shouldn't get here");
System.exit(-1); System.exit(-1);
} }
@ -305,7 +305,7 @@ public class DemoClient {
transport.close(); transport.close();
} }
private final void printVersions(ByteBuffer row, List<TCell> versions) { private void printVersions(ByteBuffer row, List<TCell> versions) {
StringBuilder rowStr = new StringBuilder(); StringBuilder rowStr = new StringBuilder();
for (TCell cell : versions) { for (TCell cell : versions) {
rowStr.append(utf8(cell.value.array())); rowStr.append(utf8(cell.value.array()));
@ -314,7 +314,7 @@ public class DemoClient {
System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr); System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr);
} }
private final void printRow(TRowResult rowResult) { private void printRow(TRowResult rowResult) {
// copy values into a TreeMap to get them in sorted order // copy values into a TreeMap to get them in sorted order
TreeMap<String, TCell> sorted = new TreeMap<String, TCell>(); TreeMap<String, TCell> sorted = new TreeMap<String, TCell>();

View File

@ -22,9 +22,6 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus; import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -51,7 +48,7 @@ import org.apache.hadoop.util.StringUtils;
healthChecker.init(healthCheckScript, scriptTimeout); healthChecker.init(healthCheckScript, scriptTimeout);
this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD,
HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD);
this.failureWindow = this.threshold * sleepTime; this.failureWindow = (long)this.threshold * (long)sleepTime;
} }
@Override @Override

View File

@ -71,7 +71,7 @@ public class ServerName implements Comparable<ServerName> {
*/ */
public static final String SERVERNAME_SEPARATOR = ","; public static final String SERVERNAME_SEPARATOR = ",";
public static Pattern SERVERNAME_PATTERN = public static final Pattern SERVERNAME_PATTERN =
Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" + Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$"); SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$");

View File

@ -35,7 +35,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
private Map<String, byte[]> attributes; private Map<String, byte[]> attributes;
// used for uniquely identifying an operation // used for uniquely identifying an operation
static public String ID_ATRIBUTE = "_operation.attributes.id"; public static final String ID_ATRIBUTE = "_operation.attributes.id";
public void setAttribute(String name, byte[] value) { public void setAttribute(String name, byte[] value) {
if (attributes == null && value == null) { if (attributes == null && value == null) {

View File

@ -95,9 +95,9 @@ public class Scan extends OperationWithAttributes {
// If application wants to collect scan metrics, it needs to // If application wants to collect scan metrics, it needs to
// call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE)) // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
static public String SCAN_ATTRIBUTES_METRICS_ENABLE = static public final String SCAN_ATTRIBUTES_METRICS_ENABLE =
"scan.attributes.metrics.enable"; "scan.attributes.metrics.enable";
static public String SCAN_ATTRIBUTES_METRICS_DATA = static public final String SCAN_ATTRIBUTES_METRICS_DATA =
"scan.attributes.metrics.data"; "scan.attributes.metrics.data";
/* /*

View File

@ -67,7 +67,7 @@ import com.google.protobuf.Service;
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message> public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message>
extends AggregateService implements CoprocessorService, Coprocessor { extends AggregateService implements CoprocessorService, Coprocessor {
protected static Log log = LogFactory.getLog(AggregateImplementation.class); protected static final Log log = LogFactory.getLog(AggregateImplementation.class);
private RegionCoprocessorEnvironment env; private RegionCoprocessorEnvironment env;
/** /**

View File

@ -67,7 +67,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
protected Server server; protected Server server;
// sequence id generator for default FIFO ordering of events // sequence id generator for default FIFO ordering of events
protected static AtomicLong seqids = new AtomicLong(0); protected static final AtomicLong seqids = new AtomicLong(0);
// sequence id for this event // sequence id for this event
private final long seqid; private final long seqid;

View File

@ -532,6 +532,16 @@ public class LruBlockCache implements BlockCache, HeapSize {
if(this.overflow() == that.overflow()) return 0; if(this.overflow() == that.overflow()) return 0;
return this.overflow() > that.overflow() ? 1 : -1; return this.overflow() > that.overflow() ? 1 : -1;
} }
@Override
public boolean equals(Object that) {
if (that == null || !(that instanceof BlockBucket)){
return false;
}
return compareTo(( BlockBucket)that) == 0;
}
} }
/** /**
@ -625,13 +635,13 @@ public class LruBlockCache implements BlockCache, HeapSize {
public void evict() { public void evict() {
synchronized(this) { synchronized(this) {
this.notify(); // FindBugs NN_NAKED_NOTIFY this.notifyAll(); // FindBugs NN_NAKED_NOTIFY
} }
} }
void shutdown() { synchronized void shutdown() {
this.go = false; this.go = false;
interrupt(); this.notifyAll();
} }
/** /**

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputFormat;
@ -49,10 +48,10 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
public class HLogInputFormat extends InputFormat<HLogKey, WALEdit> { public class HLogInputFormat extends InputFormat<HLogKey, WALEdit> {
private static Log LOG = LogFactory.getLog(HLogInputFormat.class); private static final Log LOG = LogFactory.getLog(HLogInputFormat.class);
public static String START_TIME_KEY = "hlog.start.time"; public static final String START_TIME_KEY = "hlog.start.time";
public static String END_TIME_KEY = "hlog.end.time"; public static final String END_TIME_KEY = "hlog.end.time";
/** /**
* {@link InputSplit} for {@link HLog} files. Each split represent * {@link InputSplit} for {@link HLog} files. Each split represent

View File

@ -91,13 +91,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable
public class LoadIncrementalHFiles extends Configured implements Tool { public class LoadIncrementalHFiles extends Configured implements Tool {
private static Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class); private static final Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class);
static AtomicLong regionCount = new AtomicLong(0); static final AtomicLong regionCount = new AtomicLong(0);
private HBaseAdmin hbAdmin; private HBaseAdmin hbAdmin;
private Configuration cfg; private Configuration cfg;
public static String NAME = "completebulkload"; public static final String NAME = "completebulkload";
private static String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers"; private static final String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers";
private boolean assignSeqIds; private boolean assignSeqIds;
public LoadIncrementalHFiles(Configuration conf) throws Exception { public LoadIncrementalHFiles(Configuration conf) throws Exception {
@ -626,11 +626,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
} }
HTableDescriptor htd = new HTableDescriptor(tableName); HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = null; HColumnDescriptor hcd;
// Add column families // Add column families
// Build a set of keys // Build a set of keys
byte[][] keys = null; byte[][] keys;
TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
for (FileStatus stat : familyDirStatuses) { for (FileStatus stat : familyDirStatuses) {
@ -667,10 +667,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
" last=" + Bytes.toStringBinary(last)); " last=" + Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries // To eventually infer start key-end key boundaries
Integer value = map.containsKey(first)?(Integer)map.get(first):0; Integer value = map.containsKey(first)? map.get(first):0;
map.put(first, value+1); map.put(first, value+1);
value = map.containsKey(last)?(Integer)map.get(last):0; value = map.containsKey(last)? map.get(last):0;
map.put(last, value-1); map.put(last, value-1);
} finally { } finally {
reader.close(); reader.close();

View File

@ -46,6 +46,7 @@ public class SplitRegionHandler extends EventHandler implements TotesHRegionInfo
/** /**
* For testing only! Set to true to skip handling of split. * For testing only! Set to true to skip handling of split.
*/ */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP = false; public static boolean TEST_SKIP = false;
public SplitRegionHandler(Server server, public SplitRegionHandler(Server server,

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy { public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy {
private static final Log LOG = LogFactory private static final Log LOG = LogFactory
.getLog(KeyPrefixRegionSplitPolicy.class); .getLog(KeyPrefixRegionSplitPolicy.class);
public static String PREFIX_LENGTH_KEY = "prefix_split_key_policy.prefix_length"; public static final String PREFIX_LENGTH_KEY = "prefix_split_key_policy.prefix_length";
private int prefixLength = 0; private int prefixLength = 0;

View File

@ -49,8 +49,8 @@ public class Permission extends VersionedWritable {
public byte code() { return code; } public byte code() { return code; }
} }
private static Log LOG = LogFactory.getLog(Permission.class); private static final Log LOG = LogFactory.getLog(Permission.class);
protected static Map<Byte,Action> ACTION_BY_CODE = Maps.newHashMap(); protected static final Map<Byte,Action> ACTION_BY_CODE = Maps.newHashMap();
protected Action[] actions; protected Action[] actions;

View File

@ -27,7 +27,7 @@ import org.apache.zookeeper.WatchedEvent;
@InterfaceAudience.Private @InterfaceAudience.Private
public class EmptyWatcher implements Watcher { public class EmptyWatcher implements Watcher {
// Used in this package but also by tests so needs to be public // Used in this package but also by tests so needs to be public
public static EmptyWatcher instance = new EmptyWatcher(); public static final EmptyWatcher instance = new EmptyWatcher();
private EmptyWatcher() {} private EmptyWatcher() {}
public void process(WatchedEvent event) {} public void process(WatchedEvent event) {}

14
pom.xml
View File

@ -883,7 +883,7 @@
<maven.antrun.version>1.6</maven.antrun.version> <maven.antrun.version>1.6</maven.antrun.version>
<jamon.plugin.version>2.3.4</jamon.plugin.version> <jamon.plugin.version>2.3.4</jamon.plugin.version>
<findbugs-maven-plugin.version>2.5.2</findbugs-maven-plugin.version> <findbugs-maven-plugin.version>2.5.2</findbugs-maven-plugin.version>
<findbugs.version>2.0.1</findbugs.version> <!-- as the plugin version for safety --> <findbugs-annotations>1.3.9-1</findbugs-annotations>
<maven.site.version>3.1</maven.site.version> <maven.site.version>3.1</maven.site.version>
<javadoc.version>2.9</javadoc.version> <javadoc.version>2.9</javadoc.version>
<maven.resources.plugin.version>2.5</maven.resources.plugin.version> <maven.resources.plugin.version>2.5</maven.resources.plugin.version>
@ -1263,15 +1263,9 @@
<dependencies> <dependencies>
<dependency> <dependency>
<groupId>com.google.code.findbugs</groupId> <groupId>com.github.stephenc.findbugs</groupId>
<artifactId>annotations</artifactId> <artifactId>findbugs-annotations</artifactId>
<version>${findbugs.version}</version> <version>${findbugs-annotations}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
<version>${findbugs.version}</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>