HBASE-5598 Analyse and fix the findbugs reporting by QA and add invalid bugs into findbugs-excludeFilter file

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1425351 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
nkeywal 2012-12-22 22:05:30 +00:00
parent dceffb1bda
commit 9fac4877d3
6 changed files with 234 additions and 165 deletions

View File

@ -16,102 +16,186 @@
-->
<FindBugsFilter>
<Match>
<Package name="org.apache.hadoop.hbase.thrift2.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.thrift2.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.thrift.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.thrift.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.rest.protobuf.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.rest.protobuf.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.protobuf.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.coprocessor.example.generated"/>
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.protobuf.generated"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost" />
<Or>
<Method name="preExists" />
<Method name="preCheckAndPut" />
<Method name="preCheckAndDelete" />
<Method name="preScannerNext" />
</Or>
<Bug pattern="NP_BOOLEAN_RETURN_NULL" />
</Match>
<Match>
<Package name="org.apache.hadoop.hbase.coprocessor.example.generated"/>
</Match>
<!-- This is read by a thread from hadoop and findbugs never finds it -->
<Match>
<Bug code="UrF"/>
<Class name="org.apache.hadoop.hbase.metrics.BaseSourceImpl"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost"/>
<Or>
<Method name="preExists"/>
<Method name="preCheckAndPut"/>
<Method name="preCheckAndDelete"/>
<Method name="preScannerNext"/>
</Or>
<Bug pattern="NP_BOOLEAN_RETURN_NULL"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.StoreFile$Writer" />
<Bug pattern="NP_NULL_PARAM_DEREF" />
</Match>
<!-- This is read by a thread from hadoop and findbugs never finds it -->
<Match>
<Bug code="UrF"/>
<Class name="org.apache.hadoop.hbase.metrics.BaseSourceImpl"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader"/>
<Or>
<Method name="addFileInfoToException" />
</Or>
<Bug pattern="REC_CATCH_EXCEPTION" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.StoreFile$Writer"/>
<Bug pattern="NP_NULL_PARAM_DEREF"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.KeyValue"/>
<Or>
<Method name="createEmptyByteArray" />
<Method name="createByteArray" />
</Or>
<Bug pattern="INT_VACUOUS_COMPARISON" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader"/>
<Or>
<Method name="addFileInfoToException"/>
</Or>
<Bug pattern="REC_CATCH_EXCEPTION"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.LruHashMap"/>
<Or>
<Method name="equals" />
</Or>
<Bug pattern="EQ_UNUSUAL" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
<Or>
<Method name="putInt" />
</Or>
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.mapreduce.MultithreadedTableMapper"/>
<Or>
<Method name="MapRunner" />
</Or>
<Bug pattern="REC_CATCH_EXCEPTION" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.KeyValue"/>
<Or>
<Method name="createEmptyByteArray"/>
<Method name="createByteArray"/>
</Or>
<Bug pattern="INT_VACUOUS_COMPARISON"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.LruHashMap"/>
<Or>
<Method name="equals"/>
</Or>
<Bug pattern="EQ_UNUSUAL"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.util.ByteBufferUtils"/>
<Or>
<Method name="putInt"/>
</Or>
<Bug pattern="ICAST_QUESTIONABLE_UNSIGNED_RIGHT_SHIFT"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.mapreduce.MultithreadedTableMapper"/>
<Or>
<Method name="MapRunner"/>
</Or>
<Bug pattern="REC_CATCH_EXCEPTION"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.util.PoolMap$RoundRobinPool"/>
<Bug pattern="EQ_DOESNT_OVERRIDE_EQUALS"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.HRegion"/>
<Or>
<Method name="startRegionOperation"/>
<Method name="startBulkRegionOperation"/>
</Or>
<Bug pattern="UL_UNRELEASED_LOCK"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.filter.ParseConstants"/>
<!--
A mutable static field could be changed by malicious code or by accident. The field could be
made package protected to avoid this vulnerability.
We have a set of stuff like:
public static final byte [] SKIP_ARRAY = new byte [ ] {'S', 'K', 'I', 'P'};
Warning is not wrong, but difficult to avoid...
!-->
<Bug pattern="PKGPROTECT"/>
</Match>
<Match>
<!--
Returning a reference to a mutable object value stored in one of the object's fields exposes
the internal representation of the object. If instances are accessed by untrusted code,
and unchecked changes to the mutable object would compromise security or other important
properties, you will need to do something different. Returning a new copy of the object is
better approach in many situations.
We have getters on our internal fields. Questionable, but out of findbugs scope. Returning a
copy is not practical in most cases.
!-->
<Bug pattern="EI_EXPOSE_REP"/>
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP2"/>
</Match>
<Match>
<!--
This class implements the Comparator interface. You should consider whether or not it should
also implement the Serializable interface. If a comparator is used to construct an ordered
collection such as a TreeMap, then the TreeMap will be serializable only if the comparator
is also serializable. As most comparators have little or no state, making them serializable
is generally easy and good defensive programming.
!-->
<Bug pattern="SE_COMPARATOR_SHOULD_BE_SERIALIZABLE"/>
</Match>
<Match>
<!--
This method performs synchronization an object that is an instance of a class from
the java.util.concurrent package (or its subclasses). Instances of these classes have their own
concurrency control mechanisms that are orthogonal to the synchronization provided by the Java
keyword synchronized. For example, synchronizing on an AtomicBoolean will not prevent other
threads from modifying the AtomicBoolean.
Such code may be correct, but should be carefully reviewed and documented, and may confuse people
who have to maintain the code at a later date.
We do that all the time to save lock objects.
!-->
<Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER"/>
</Match>
<Match>
<!--
Found a call to a method which will perform a byte to String (or String to byte) conversion,
and will assume that the default platform encoding is suitable. This will cause the
application behaviour to vary between platforms. Use an alternative API and specify a
charset name or Charset object explicitly.
!-->
<Bug pattern="DM_DEFAULT_ENCODING"/>
</Match>
<Match>
<!--
Invoking System.exit shuts down the entire Java virtual machine. This should only been
done when it is appropriate. Such calls make it hard or impossible for your code to be
invoked by other code. Consider throwing a RuntimeException instead.
It's so bad that the reviews will catch all the wrong cases.
!-->
<Bug pattern="DM_EXIT"/>
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.util.PoolMap$RoundRobinPool"/>
<Bug pattern="EQ_DOESNT_OVERRIDE_EQUALS" />
</Match>
<Match>
<Class name="org.apache.hadoop.hbase.regionserver.HRegion" />
<Or>
<Method name="startRegionOperation" />
<Method name="startBulkRegionOperation" />
</Or>
<Bug pattern="UL_UNRELEASED_LOCK" />
</Match>
</FindBugsFilter>

View File

@ -19,5 +19,5 @@ MAVEN_OPTS="-Xmx3g"
# Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=84
OK_FINDBUGS_WARNINGS=517
OK_FINDBUGS_WARNINGS=226
OK_JAVADOC_WARNINGS=0

View File

@ -29,73 +29,63 @@ import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class SplitLogCounters {
//SplitLogManager counters
public static AtomicLong tot_mgr_log_split_batch_start = new AtomicLong(0);
public static AtomicLong tot_mgr_log_split_batch_success =
new AtomicLong(0);
public static AtomicLong tot_mgr_log_split_batch_err = new AtomicLong(0);
public static AtomicLong tot_mgr_new_unexpected_hlogs = new AtomicLong(0);
public static AtomicLong tot_mgr_log_split_start = new AtomicLong(0);
public static AtomicLong tot_mgr_log_split_success = new AtomicLong(0);
public static AtomicLong tot_mgr_log_split_err = new AtomicLong(0);
public static AtomicLong tot_mgr_node_create_queued = new AtomicLong(0);
public static AtomicLong tot_mgr_node_create_result = new AtomicLong(0);
public static AtomicLong tot_mgr_node_already_exists = new AtomicLong(0);
public static AtomicLong tot_mgr_node_create_err = new AtomicLong(0);
public static AtomicLong tot_mgr_node_create_retry = new AtomicLong(0);
public static AtomicLong tot_mgr_get_data_queued = new AtomicLong(0);
public static AtomicLong tot_mgr_get_data_result = new AtomicLong(0);
public static AtomicLong tot_mgr_get_data_nonode = new AtomicLong(0);
public static AtomicLong tot_mgr_get_data_err = new AtomicLong(0);
public static AtomicLong tot_mgr_get_data_retry = new AtomicLong(0);
public static AtomicLong tot_mgr_node_delete_queued = new AtomicLong(0);
public static AtomicLong tot_mgr_node_delete_result = new AtomicLong(0);
public static AtomicLong tot_mgr_node_delete_err = new AtomicLong(0);
public static AtomicLong tot_mgr_resubmit = new AtomicLong(0);
public static AtomicLong tot_mgr_resubmit_failed = new AtomicLong(0);
public static AtomicLong tot_mgr_null_data = new AtomicLong(0);
public static AtomicLong tot_mgr_orphan_task_acquired = new AtomicLong(0);
public static AtomicLong tot_mgr_wait_for_zk_delete = new AtomicLong(0);
public static AtomicLong tot_mgr_unacquired_orphan_done = new AtomicLong(0);
public static AtomicLong tot_mgr_resubmit_threshold_reached =
new AtomicLong(0);
public static AtomicLong tot_mgr_missing_state_in_delete =
new AtomicLong(0);
public static AtomicLong tot_mgr_heartbeat = new AtomicLong(0);
public static AtomicLong tot_mgr_rescan = new AtomicLong(0);
public static AtomicLong tot_mgr_rescan_deleted = new AtomicLong(0);
public static AtomicLong tot_mgr_task_deleted = new AtomicLong(0);
public static AtomicLong tot_mgr_resubmit_unassigned = new AtomicLong(0);
public static AtomicLong tot_mgr_relist_logdir = new AtomicLong(0);
public static AtomicLong tot_mgr_resubmit_dead_server_task =
new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_batch_start = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_batch_success = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_batch_err = new AtomicLong(0);
public final static AtomicLong tot_mgr_new_unexpected_hlogs = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_start = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_success = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_err = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_create_queued = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_create_result = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_already_exists = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_create_err = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_create_retry = new AtomicLong(0);
public final static AtomicLong tot_mgr_get_data_queued = new AtomicLong(0);
public final static AtomicLong tot_mgr_get_data_result = new AtomicLong(0);
public final static AtomicLong tot_mgr_get_data_nonode = new AtomicLong(0);
public final static AtomicLong tot_mgr_get_data_err = new AtomicLong(0);
public final static AtomicLong tot_mgr_get_data_retry = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_delete_queued = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_delete_result = new AtomicLong(0);
public final static AtomicLong tot_mgr_node_delete_err = new AtomicLong(0);
public final static AtomicLong tot_mgr_resubmit = new AtomicLong(0);
public final static AtomicLong tot_mgr_resubmit_failed = new AtomicLong(0);
public final static AtomicLong tot_mgr_null_data = new AtomicLong(0);
public final static AtomicLong tot_mgr_orphan_task_acquired = new AtomicLong(0);
public final static AtomicLong tot_mgr_wait_for_zk_delete = new AtomicLong(0);
public final static AtomicLong tot_mgr_unacquired_orphan_done = new AtomicLong(0);
public final static AtomicLong tot_mgr_resubmit_threshold_reached = new AtomicLong(0);
public final static AtomicLong tot_mgr_missing_state_in_delete = new AtomicLong(0);
public final static AtomicLong tot_mgr_heartbeat = new AtomicLong(0);
public final static AtomicLong tot_mgr_rescan = new AtomicLong(0);
public final static AtomicLong tot_mgr_rescan_deleted = new AtomicLong(0);
public final static AtomicLong tot_mgr_task_deleted = new AtomicLong(0);
public final static AtomicLong tot_mgr_resubmit_unassigned = new AtomicLong(0);
public final static AtomicLong tot_mgr_relist_logdir = new AtomicLong(0);
public final static AtomicLong tot_mgr_resubmit_dead_server_task = new AtomicLong(0);
// SplitLogWorker counters
public static AtomicLong tot_wkr_failed_to_grab_task_no_data =
new AtomicLong(0);
public static AtomicLong tot_wkr_failed_to_grab_task_exception =
new AtomicLong(0);
public static AtomicLong tot_wkr_failed_to_grab_task_owned =
new AtomicLong(0);
public static AtomicLong tot_wkr_failed_to_grab_task_lost_race =
new AtomicLong(0);
public static AtomicLong tot_wkr_task_acquired = new AtomicLong(0);
public static AtomicLong tot_wkr_task_resigned = new AtomicLong(0);
public static AtomicLong tot_wkr_task_done = new AtomicLong(0);
public static AtomicLong tot_wkr_task_err = new AtomicLong(0);
public static AtomicLong tot_wkr_task_heartbeat = new AtomicLong(0);
public static AtomicLong tot_wkr_task_acquired_rescan = new AtomicLong(0);
public static AtomicLong tot_wkr_get_data_queued = new AtomicLong(0);
public static AtomicLong tot_wkr_get_data_result = new AtomicLong(0);
public static AtomicLong tot_wkr_get_data_retry = new AtomicLong(0);
public static AtomicLong tot_wkr_preempt_task = new AtomicLong(0);
public static AtomicLong tot_wkr_task_heartbeat_failed = new AtomicLong(0);
public static AtomicLong tot_wkr_final_transistion_failed =
new AtomicLong(0);
public final static AtomicLong tot_wkr_failed_to_grab_task_no_data = new AtomicLong(0);
public final static AtomicLong tot_wkr_failed_to_grab_task_exception = new AtomicLong(0);
public final static AtomicLong tot_wkr_failed_to_grab_task_owned = new AtomicLong(0);
public final static AtomicLong tot_wkr_failed_to_grab_task_lost_race = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_acquired = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_resigned = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_done = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_err = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_heartbeat = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_acquired_rescan = new AtomicLong(0);
public final static AtomicLong tot_wkr_get_data_queued = new AtomicLong(0);
public final static AtomicLong tot_wkr_get_data_result = new AtomicLong(0);
public final static AtomicLong tot_wkr_get_data_retry = new AtomicLong(0);
public final static AtomicLong tot_wkr_preempt_task = new AtomicLong(0);
public final static AtomicLong tot_wkr_task_heartbeat_failed = new AtomicLong(0);
public final static AtomicLong tot_wkr_final_transition_failed = new AtomicLong(0);
public static void resetCounters() throws Exception {
Class<?> cl = (new SplitLogCounters()).getClass();
Field[] flds = cl.getDeclaredFields();
for (Field fld : flds) {
for (Field fld : cl.getDeclaredFields()) {
((AtomicLong)fld.get(null)).set(0);
}
}

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask;
@ -80,7 +79,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
private volatile String currentTask = null;
private int currentVersion;
private volatile boolean exitWorker;
private Object grabTaskLock = new Object();
private final Object grabTaskLock = new Object();
private boolean workerInGrabTask = false;
@ -109,8 +108,8 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
// interrupted or has encountered a transient error and when it has
// encountered a bad non-retry-able persistent error.
try {
if (HLogSplitter.splitLogFile(rootdir,
fs.getFileStatus(new Path(filename)), fs, conf, p, sequenceIdChecker) == false) {
if (!HLogSplitter.splitLogFile(rootdir,
fs.getFileStatus(new Path(filename)), fs, conf, p, sequenceIdChecker)) {
return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) {
@ -249,13 +248,13 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
return;
}
if (slt.isUnassigned() == false) {
if (!slt.isUnassigned()) {
SplitLogCounters.tot_wkr_failed_to_grab_task_owned.incrementAndGet();
return;
}
currentVersion = stat.getVersion();
if (attemptToOwnTask(true) == false) {
if (!attemptToOwnTask(true)) {
SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet();
return;
}
@ -277,7 +276,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
@Override
public boolean progress() {
if (attemptToOwnTask(false) == false) {
if (!attemptToOwnTask(false)) {
LOG.warn("Failed to heartbeat the task" + currentTask);
return false;
}
@ -323,7 +322,6 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
Thread.interrupted();
}
}
return;
}
/**
@ -395,8 +393,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
} catch (KeeperException e) {
LOG.warn("failed to end task, " + path + " " + slt, e);
}
SplitLogCounters.tot_wkr_final_transistion_failed.incrementAndGet();
return;
SplitLogCounters.tot_wkr_final_transition_failed.incrementAndGet();
}
void getDataSetWatchAsync() {
@ -531,7 +528,6 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
worker = new Thread(null, this, "SplitLogWorker-" + serverName);
exitWorker = false;
worker.start();
return;
}
/**
@ -558,7 +554,6 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
}
data = watcher.getRecoverableZooKeeper().removeMetaData(data);
getDataSetWatchSuccess(path, data);
return;
}
}
@ -574,7 +569,7 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable {
DONE(),
ERR(),
RESIGNED(),
PREEMPTED();
PREEMPTED()
}
public Status exec(String name, CancelableProgressable p);
}

View File

@ -212,13 +212,13 @@ public class TestDistributedLogSplitting {
long endt = curt + waitTime;
while (curt < endt) {
if ((tot_wkr_task_resigned.get() + tot_wkr_task_err.get() +
tot_wkr_final_transistion_failed.get() + tot_wkr_task_done.get() +
tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() +
tot_wkr_preempt_task.get()) == 0) {
Thread.yield();
curt = System.currentTimeMillis();
} else {
assertEquals(1, (tot_wkr_task_resigned.get() + tot_wkr_task_err.get() +
tot_wkr_final_transistion_failed.get() + tot_wkr_task_done.get() +
tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() +
tot_wkr_preempt_task.get()));
return;
}
@ -226,7 +226,7 @@ public class TestDistributedLogSplitting {
fail("none of the following counters went up in " + waitTime +
" milliseconds - " +
"tot_wkr_task_resigned, tot_wkr_task_err, " +
"tot_wkr_final_transistion_failed, tot_wkr_task_done, " +
"tot_wkr_final_transition_failed, tot_wkr_task_done, " +
"tot_wkr_preempt_task");
}

View File

@ -865,7 +865,7 @@
<maven.assembly.version>2.3</maven.assembly.version>
<maven.antrun.version>1.6</maven.antrun.version>
<jamon.plugin.version>2.3.4</jamon.plugin.version>
<findbugs.version>2.4.0</findbugs.version>
<findbugs.version>2.5.2</findbugs.version>
<maven.site.version>3.1</maven.site.version>
<javadoc.version>2.9</javadoc.version>
<maven.resources.plugin.version>2.5</maven.resources.plugin.version>