HBASE-18308 Eliminate the findbugs warnings for hbase-server
This commit is contained in:
parent
d0e4a643a0
commit
3574757f74
|
@ -309,12 +309,10 @@ public class LocalHBaseCluster {
|
|||
*/
|
||||
public HMaster getActiveMaster() {
|
||||
for (JVMClusterUtil.MasterThread mt : masterThreads) {
|
||||
if (mt.getMaster().isActiveMaster()) {
|
||||
// Ensure that the current active master is not stopped.
|
||||
// We don't want to return a stopping master as an active master.
|
||||
if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
|
||||
return mt.getMaster();
|
||||
}
|
||||
// Ensure that the current active master is not stopped.
|
||||
// We don't want to return a stopping master as an active master.
|
||||
if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
|
||||
return mt.getMaster();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -614,8 +614,8 @@ public final class Constraints {
|
|||
@Override
|
||||
public int compare(Constraint c1, Constraint c2) {
|
||||
// compare the priorities of the constraints stored in their configuration
|
||||
return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY))
|
||||
.compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
|
||||
return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
|
||||
c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
|
|||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||
import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
|
||||
|
@ -572,7 +573,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
|
|||
getDataSetWatchFailure(path);
|
||||
return;
|
||||
}
|
||||
data = watcher.getRecoverableZooKeeper().removeMetaData(data);
|
||||
data = RecoverableZooKeeper.removeMetaData(data);
|
||||
getDataSetWatchSuccess(path, data);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,6 +90,9 @@ public class JarFinder {
|
|||
private static void zipDir(File dir, String relativePath, ZipOutputStream zos,
|
||||
boolean start) throws IOException {
|
||||
String[] dirList = dir.list();
|
||||
if (dirList == null) {
|
||||
return;
|
||||
}
|
||||
for (String aDirList : dirList) {
|
||||
File f = new File(dir, aDirList);
|
||||
if (!f.isHidden()) {
|
||||
|
|
|
@ -1026,12 +1026,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
protected List<LoadQueueItem> tryAtomicRegionLoad(ClientServiceCallable<byte[]> serviceCallable,
|
||||
final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis)
|
||||
throws IOException {
|
||||
final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size());
|
||||
for (LoadQueueItem lqi : lqis) {
|
||||
if (!unmatchedFamilies.contains(Bytes.toString(lqi.family))) {
|
||||
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
|
||||
}
|
||||
}
|
||||
try {
|
||||
List<LoadQueueItem> toRetry = new ArrayList<>();
|
||||
Configuration conf = getConf();
|
||||
|
|
|
@ -134,11 +134,6 @@ public class DeadServer {
|
|||
|
||||
assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative";
|
||||
|
||||
if (numProcessing < 0) {
|
||||
LOG.error("Number of dead servers in processing = " + numProcessing
|
||||
+ ". Something went wrong, this should always be non-negative.");
|
||||
numProcessing = 0;
|
||||
}
|
||||
if (numProcessing == 0) { processing = false; }
|
||||
}
|
||||
|
||||
|
|
|
@ -2858,7 +2858,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
checkResources();
|
||||
startRegionOperation(Operation.DELETE);
|
||||
try {
|
||||
delete.getRow();
|
||||
// All edits for the given row (across all column families) must happen atomically.
|
||||
doBatchMutate(delete);
|
||||
} finally {
|
||||
|
@ -3192,9 +3191,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
* In here we also handle replay of edits on region recover.
|
||||
* @return Change in size brought about by applying <code>batchOp</code>
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK",
|
||||
justification="Findbugs seems to be confused on this.")
|
||||
@SuppressWarnings("unchecked")
|
||||
// TODO: This needs a rewrite. Doesn't have to be this long. St.Ack 20160120
|
||||
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
|
||||
boolean replay = batchOp.isInReplay();
|
||||
|
|
|
@ -505,7 +505,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf
|
|||
|
||||
@Override
|
||||
public int compare(Path o1, Path o2) {
|
||||
return Long.valueOf(getTS(o1)).compareTo(getTS(o2));
|
||||
return Long.compare(getTS(o1), getTS(o2));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1081,10 +1081,11 @@ public final class Canary implements Tool {
|
|||
}
|
||||
}
|
||||
Map<String, AtomicLong> actualReadTableLatency = regionSink.getReadLatencyMap();
|
||||
for (String tableName : this.configuredReadTableTimeouts.keySet()) {
|
||||
for (Map.Entry<String, Long> entry : configuredReadTableTimeouts.entrySet()) {
|
||||
String tableName = entry.getKey();
|
||||
if (actualReadTableLatency.containsKey(tableName)) {
|
||||
Long actual = actualReadTableLatency.get(tableName).longValue();
|
||||
Long configured = this.configuredReadTableTimeouts.get(tableName);
|
||||
Long configured = entry.getValue();
|
||||
LOG.info("Read operation for " + tableName + " took " + actual +
|
||||
" ms. The configured read timeout was " + configured + " ms.");
|
||||
if (actual > configured) {
|
||||
|
|
Loading…
Reference in New Issue