HBASE-18308 Eliminate the findbugs warnings for hbase-server

This commit is contained in:
Chia-Ping Tsai 2017-07-20 00:35:35 +08:00
parent 19154c8ea8
commit 594f892c02
9 changed files with 15 additions and 27 deletions

View File

@ -309,14 +309,12 @@ public class LocalHBaseCluster {
*/ */
public HMaster getActiveMaster() { public HMaster getActiveMaster() {
for (JVMClusterUtil.MasterThread mt : masterThreads) { for (JVMClusterUtil.MasterThread mt : masterThreads) {
if (mt.getMaster().isActiveMaster()) {
// Ensure that the current active master is not stopped. // Ensure that the current active master is not stopped.
// We don't want to return a stopping master as an active master. // We don't want to return a stopping master as an active master.
if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
return mt.getMaster(); return mt.getMaster();
} }
} }
}
return null; return null;
} }

View File

@ -614,8 +614,8 @@ public final class Constraints {
@Override @Override
public int compare(Constraint c1, Constraint c2) { public int compare(Constraint c1, Constraint c2) {
// compare the priorities of the constraints stored in their configuration // compare the priorities of the constraints stored in their configuration
return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)) return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
.compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
} }
}; };

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@ -572,7 +573,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
getDataSetWatchFailure(path); getDataSetWatchFailure(path);
return; return;
} }
data = watcher.getRecoverableZooKeeper().removeMetaData(data); data = RecoverableZooKeeper.removeMetaData(data);
getDataSetWatchSuccess(path, data); getDataSetWatchSuccess(path, data);
} }
} }

View File

@ -90,6 +90,9 @@ public class JarFinder {
private static void zipDir(File dir, String relativePath, ZipOutputStream zos, private static void zipDir(File dir, String relativePath, ZipOutputStream zos,
boolean start) throws IOException { boolean start) throws IOException {
String[] dirList = dir.list(); String[] dirList = dir.list();
if (dirList == null) {
return;
}
for (String aDirList : dirList) { for (String aDirList : dirList) {
File f = new File(dir, aDirList); File f = new File(dir, aDirList);
if (!f.isHidden()) { if (!f.isHidden()) {

View File

@ -1026,12 +1026,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable, protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable,
final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis) final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis)
throws IOException { throws IOException {
final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size());
for (LoadQueueItem lqi : lqis) {
if (!unmatchedFamilies.contains(Bytes.toString(lqi.family))) {
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
}
}
try { try {
List<LoadQueueItem> toRetry = new ArrayList<>(); List<LoadQueueItem> toRetry = new ArrayList<>();
Configuration conf = getConf(); Configuration conf = getConf();

View File

@ -134,11 +134,6 @@ public class DeadServer {
assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative"; assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative";
if (numProcessing < 0) {
LOG.error("Number of dead servers in processing = " + numProcessing
+ ". Something went wrong, this should always be non-negative.");
numProcessing = 0;
}
if (numProcessing == 0) { processing = false; } if (numProcessing == 0) { processing = false; }
} }

View File

@ -2858,7 +2858,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
checkResources(); checkResources();
startRegionOperation(Operation.DELETE); startRegionOperation(Operation.DELETE);
try { try {
delete.getRow();
// All edits for the given row (across all column families) must happen atomically. // All edits for the given row (across all column families) must happen atomically.
doBatchMutate(delete); doBatchMutate(delete);
} finally { } finally {
@ -3192,9 +3191,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* In here we also handle replay of edits on region recover. * In here we also handle replay of edits on region recover.
* @return Change in size brought about by applying <code>batchOp</code> * @return Change in size brought about by applying <code>batchOp</code>
*/ */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK",
justification="Findbugs seems to be confused on this.")
@SuppressWarnings("unchecked")
// TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120 // TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException { private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
boolean replay = batchOp.isInReplay(); boolean replay = batchOp.isInReplay();

View File

@ -505,7 +505,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf
@Override @Override
public int compare(Path o1, Path o2) { public int compare(Path o1, Path o2) {
return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); return Long.compare(getTS(o1), getTS(o2));
} }
/** /**

View File

@ -1081,10 +1081,11 @@ public final class Canary implements Tool {
} }
} }
Map<String, AtomicLong> actualReadTableLatency = regionSink.getReadLatencyMap(); Map<String, AtomicLong> actualReadTableLatency = regionSink.getReadLatencyMap();
for (String tableName : this.configuredReadTableTimeouts.keySet()) { for (Map.Entry<String, Long> entry : configuredReadTableTimeouts.entrySet()) {
String tableName = entry.getKey();
if (actualReadTableLatency.containsKey(tableName)) { if (actualReadTableLatency.containsKey(tableName)) {
Long actual = actualReadTableLatency.get(tableName).longValue(); Long actual = actualReadTableLatency.get(tableName).longValue();
Long configured = this.configuredReadTableTimeouts.get(tableName); Long configured = entry.getValue();
LOG.info("Read operation for " + tableName + " took " + actual + LOG.info("Read operation for " + tableName + " took " + actual +
" ms. The configured read timeout was " + configured + " ms."); " ms. The configured read timeout was " + configured + " ms.");
if (actual > configured) { if (actual > configured) {