HBASE-7508 Fix simple findbugs

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1430169 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
nkeywal 2013-01-08 08:31:11 +00:00
parent 176ddb066c
commit 4e0af48fe4
20 changed files with 80 additions and 56 deletions

View File

@ -36,6 +36,11 @@
<Package name="org.apache.hadoop.hbase.coprocessor.example.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.tmpl"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost"/>
<Or>

View File

@ -19,7 +19,7 @@ MAVEN_OPTS="-Xmx3g"
# Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=84
OK_FINDBUGS_WARNINGS=226
OK_FINDBUGS_WARNINGS=127
# Allow two warnings. Javadoc complains about sun.misc.Unsafe use. See HBASE-7457
OK_JAVADOC_WARNINGS=2

View File

@ -80,37 +80,37 @@ public class KeyValue implements Cell, HeapSize {
/**
* Comparator for plain key/values; i.e. non-catalog table key/values.
*/
public static KVComparator COMPARATOR = new KVComparator();
public static final KVComparator COMPARATOR = new KVComparator();
/**
* Comparator for plain key; i.e. non-catalog table key. Works on Key portion
* of KeyValue only.
*/
public static KeyComparator KEY_COMPARATOR = new KeyComparator();
public static final KeyComparator KEY_COMPARATOR = new KeyComparator();
/**
* A {@link KVComparator} for <code>.META.</code> catalog table
* {@link KeyValue}s.
*/
public static KVComparator META_COMPARATOR = new MetaComparator();
public static final KVComparator META_COMPARATOR = new MetaComparator();
/**
* A {@link KVComparator} for <code>.META.</code> catalog table
* {@link KeyValue} keys.
*/
public static KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
public static final KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
/**
* A {@link KVComparator} for <code>-ROOT-</code> catalog table
* {@link KeyValue}s.
*/
public static KVComparator ROOT_COMPARATOR = new RootComparator();
public static final KVComparator ROOT_COMPARATOR = new RootComparator();
/**
* A {@link KVComparator} for <code>-ROOT-</code> catalog table
* {@link KeyValue} keys.
*/
public static KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
public static final KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
/**
* Get the appropriate row comparator for the specified table.

View File

@ -34,7 +34,6 @@ import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell;
/**
* This class is a wrapper for the implementation of
@ -127,6 +126,7 @@ public class JVM
ofdc = runUnixMXBeanMethod("getOpenFileDescriptorCount");
return (ofdc != null ? ofdc.longValue () : -1);
}
InputStream in = null;
try {
//need to get the PID number of the process first
RuntimeMXBean rtmbean = ManagementFactory.getRuntimeMXBean();
@ -137,7 +137,7 @@ public class JVM
Process p = Runtime.getRuntime().exec(
new String[] { "bash", "-c",
"ls /proc/" + pidhost[0] + "/fdinfo | wc -l" });
InputStream in = p.getInputStream();
in = p.getInputStream();
BufferedReader output = new BufferedReader(
new InputStreamReader(in));
@ -146,6 +146,14 @@ public class JVM
return Long.parseLong(openFileDesCount);
} catch (IOException ie) {
LOG.warn("Not able to get the number of open file descriptors", ie);
} finally {
if (in != null){
try {
in.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
}
return -1;
}
@ -164,13 +172,14 @@ public class JVM
mfdc = runUnixMXBeanMethod("getMaxFileDescriptorCount");
return (mfdc != null ? mfdc.longValue () : -1);
}
InputStream in = null;
try {
//using linux bash commands to retrieve info
Process p = Runtime.getRuntime().exec(
new String[] { "bash", "-c",
"ulimit -n" });
InputStream in = p.getInputStream();
in = p.getInputStream();
BufferedReader output = new BufferedReader(
new InputStreamReader(in));
@ -179,6 +188,14 @@ public class JVM
return Long.parseLong(maxFileDesCount);
} catch (IOException ie) {
LOG.warn("Not able to get the max number of file descriptors", ie);
} finally {
if (in != null){
try {
in.close();
} catch (IOException e) {
LOG.warn("Not able to close the InputStream", e);
}
}
}
return -1;
}

View File

@ -63,8 +63,8 @@ import org.apache.zookeeper.ZooKeeper;
* listeners registered with ZooKeeperWatcher cannot be removed.
*/
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
public static String node = "/backup/example/lastbackup";
public static String zkkey = "ZK";
public static final String node = "/backup/example/lastbackup";
public static final String zkkey = "ZK";
private static final Log LOG = LogFactory.getLog(ZooKeeperScanPolicyObserver.class);
/**
@ -93,6 +93,7 @@ public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {
*
* @return the last know version of the data
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION")
public byte[] getData() {
// try at most twice/minute
if (needSetup && EnvironmentEdgeManager.currentTimeMillis() > lastSetupTry + 30000) {

View File

@ -127,7 +127,7 @@ public class DemoClient {
// Create the demo table with two column families, entry: and unused:
//
ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
ColumnDescriptor col = null;
ColumnDescriptor col;
col = new ColumnDescriptor();
col.name = ByteBuffer.wrap(bytes("entry:"));
col.maxVersions = 10;
@ -227,7 +227,7 @@ public class DemoClient {
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
Mutation m = null;
Mutation m;
mutations = new ArrayList<Mutation>();
m = new Mutation();
m.column = ByteBuffer.wrap(bytes("entry:foo"));
@ -272,7 +272,7 @@ public class DemoClient {
}
List<TCell> result = client.get(ByteBuffer.wrap(t), ByteBuffer.wrap(row), ByteBuffer.wrap(bytes("entry:foo")), dummyAttributes);
if (result.isEmpty() == false) {
if (!result.isEmpty()) {
System.out.println("FATAL: shouldn't get here");
System.exit(-1);
}
@ -305,7 +305,7 @@ public class DemoClient {
transport.close();
}
private final void printVersions(ByteBuffer row, List<TCell> versions) {
private void printVersions(ByteBuffer row, List<TCell> versions) {
StringBuilder rowStr = new StringBuilder();
for (TCell cell : versions) {
rowStr.append(utf8(cell.value.array()));
@ -314,7 +314,7 @@ public class DemoClient {
System.out.println("row: " + utf8(row.array()) + ", values: " + rowStr);
}
private final void printRow(TRowResult rowResult) {
private void printRow(TRowResult rowResult) {
// copy values into a TreeMap to get them in sorted order
TreeMap<String, TCell> sorted = new TreeMap<String, TCell>();

View File

@ -22,9 +22,6 @@ package org.apache.hadoop.hbase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus;
import org.apache.hadoop.util.StringUtils;
@ -51,7 +48,7 @@ import org.apache.hadoop.util.StringUtils;
healthChecker.init(healthCheckScript, scriptTimeout);
this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD,
HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD);
this.failureWindow = this.threshold * sleepTime;
this.failureWindow = (long)this.threshold * (long)sleepTime;
}
@Override

View File

@ -71,7 +71,7 @@ public class ServerName implements Comparable<ServerName> {
*/
public static final String SERVERNAME_SEPARATOR = ",";
public static Pattern SERVERNAME_PATTERN =
public static final Pattern SERVERNAME_PATTERN =
Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX +
SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$");

View File

@ -35,7 +35,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
private Map<String, byte[]> attributes;
// used for uniquely identifying an operation
static public String ID_ATRIBUTE = "_operation.attributes.id";
public static final String ID_ATRIBUTE = "_operation.attributes.id";
public void setAttribute(String name, byte[] value) {
if (attributes == null && value == null) {

View File

@ -95,9 +95,9 @@ public class Scan extends OperationWithAttributes {
// If application wants to collect scan metrics, it needs to
// call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE))
static public String SCAN_ATTRIBUTES_METRICS_ENABLE =
static public final String SCAN_ATTRIBUTES_METRICS_ENABLE =
"scan.attributes.metrics.enable";
static public String SCAN_ATTRIBUTES_METRICS_DATA =
static public final String SCAN_ATTRIBUTES_METRICS_DATA =
"scan.attributes.metrics.data";
/*

View File

@ -67,7 +67,7 @@ import com.google.protobuf.Service;
@InterfaceStability.Evolving
public class AggregateImplementation<T, S, P extends Message, Q extends Message, R extends Message>
extends AggregateService implements CoprocessorService, Coprocessor {
protected static Log log = LogFactory.getLog(AggregateImplementation.class);
protected static final Log log = LogFactory.getLog(AggregateImplementation.class);
private RegionCoprocessorEnvironment env;
/**

View File

@ -67,7 +67,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
protected Server server;
// sequence id generator for default FIFO ordering of events
protected static AtomicLong seqids = new AtomicLong(0);
protected static final AtomicLong seqids = new AtomicLong(0);
// sequence id for this event
private final long seqid;

View File

@ -532,6 +532,16 @@ public class LruBlockCache implements BlockCache, HeapSize {
if(this.overflow() == that.overflow()) return 0;
return this.overflow() > that.overflow() ? 1 : -1;
}
@Override
public boolean equals(Object that) {
if (that == null || !(that instanceof BlockBucket)){
return false;
}
return compareTo(( BlockBucket)that) == 0;
}
}
/**
@ -625,13 +635,13 @@ public class LruBlockCache implements BlockCache, HeapSize {
public void evict() {
synchronized(this) {
this.notify(); // FindBugs NN_NAKED_NOTIFY
this.notifyAll(); // FindBugs NN_NAKED_NOTIFY
}
}
void shutdown() {
synchronized void shutdown() {
this.go = false;
interrupt();
this.notifyAll();
}
/**

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
@ -49,10 +48,10 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
*/
@InterfaceAudience.Public
public class HLogInputFormat extends InputFormat<HLogKey, WALEdit> {
private static Log LOG = LogFactory.getLog(HLogInputFormat.class);
private static final Log LOG = LogFactory.getLog(HLogInputFormat.class);
public static String START_TIME_KEY = "hlog.start.time";
public static String END_TIME_KEY = "hlog.end.time";
public static final String START_TIME_KEY = "hlog.start.time";
public static final String END_TIME_KEY = "hlog.end.time";
/**
* {@link InputSplit} for {@link HLog} files. Each split represent

View File

@ -91,13 +91,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LoadIncrementalHFiles extends Configured implements Tool {
private static Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class);
static AtomicLong regionCount = new AtomicLong(0);
private static final Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class);
static final AtomicLong regionCount = new AtomicLong(0);
private HBaseAdmin hbAdmin;
private Configuration cfg;
public static String NAME = "completebulkload";
private static String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers";
public static final String NAME = "completebulkload";
private static final String ASSIGN_SEQ_IDS = "hbase.mapreduce.bulkload.assign.sequenceNumbers";
private boolean assignSeqIds;
public LoadIncrementalHFiles(Configuration conf) throws Exception {
@ -626,11 +626,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
}
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = null;
HColumnDescriptor hcd;
// Add column families
// Build a set of keys
byte[][] keys = null;
byte[][] keys;
TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
for (FileStatus stat : familyDirStatuses) {
@ -667,10 +667,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
" last=" + Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries
Integer value = map.containsKey(first)?(Integer)map.get(first):0;
Integer value = map.containsKey(first)? map.get(first):0;
map.put(first, value+1);
value = map.containsKey(last)?(Integer)map.get(last):0;
value = map.containsKey(last)? map.get(last):0;
map.put(last, value-1);
} finally {
reader.close();

View File

@ -46,6 +46,7 @@ public class SplitRegionHandler extends EventHandler implements TotesHRegionInfo
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP = false;
public SplitRegionHandler(Server server,

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPolicy {
private static final Log LOG = LogFactory
.getLog(KeyPrefixRegionSplitPolicy.class);
public static String PREFIX_LENGTH_KEY = "prefix_split_key_policy.prefix_length";
public static final String PREFIX_LENGTH_KEY = "prefix_split_key_policy.prefix_length";
private int prefixLength = 0;

View File

@ -49,8 +49,8 @@ public class Permission extends VersionedWritable {
public byte code() { return code; }
}
private static Log LOG = LogFactory.getLog(Permission.class);
protected static Map<Byte,Action> ACTION_BY_CODE = Maps.newHashMap();
private static final Log LOG = LogFactory.getLog(Permission.class);
protected static final Map<Byte,Action> ACTION_BY_CODE = Maps.newHashMap();
protected Action[] actions;

View File

@ -27,7 +27,7 @@ import org.apache.zookeeper.WatchedEvent;
@InterfaceAudience.Private
public class EmptyWatcher implements Watcher {
// Used in this package but also by tests so needs to be public
public static EmptyWatcher instance = new EmptyWatcher();
public static final EmptyWatcher instance = new EmptyWatcher();
private EmptyWatcher() {}
public void process(WatchedEvent event) {}

14
pom.xml
View File

@ -883,7 +883,7 @@
<maven.antrun.version>1.6</maven.antrun.version>
<jamon.plugin.version>2.3.4</jamon.plugin.version>
<findbugs-maven-plugin.version>2.5.2</findbugs-maven-plugin.version>
<findbugs.version>2.0.1</findbugs.version> <!-- as the plugin version for safety -->
<findbugs-annotations>1.3.9-1</findbugs-annotations>
<maven.site.version>3.1</maven.site.version>
<javadoc.version>2.9</javadoc.version>
<maven.resources.plugin.version>2.5</maven.resources.plugin.version>
@ -1263,15 +1263,9 @@
<dependencies>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>annotations</artifactId>
<version>${findbugs.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
<version>${findbugs.version}</version>
<groupId>com.github.stephenc.findbugs</groupId>
<artifactId>findbugs-annotations</artifactId>
<version>${findbugs-annotations}</version>
<scope>compile</scope>
</dependency>