diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index b98078ad3e0..42484e71c94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -313,12 +313,10 @@ public class LocalHBaseCluster { */ public HMaster getActiveMaster() { for (JVMClusterUtil.MasterThread mt : masterThreads) { - if (mt.getMaster().isActiveMaster()) { - // Ensure that the current active master is not stopped. - // We don't want to return a stopping master as an active master. - if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { - return mt.getMaster(); - } + // Ensure that the current active master is not stopped. + // We don't want to return a stopping master as an active master. + if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) { + return mt.getMaster(); } } return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index 85ef71781a1..c96bf3d1a7f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -616,8 +616,8 @@ public final class Constraints { @Override public int compare(Constraint c1, Constraint c2) { // compare the priorities of the constraints stored in their configuration - return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)) - .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); + return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY), + c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY)); } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index dfbe648d9a1..e0421d96ba3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -90,6 +90,9 @@ public class JarFinder { private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) throws IOException { String[] dirList = dir.list(); + if (dirList == null) { + return; + } for (String aDirList : dirList) { File f = new File(dir, aDirList); if (!f.isHidden()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java index c33cdcc951f..81accd2215a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DeadServer.java @@ -134,11 +134,6 @@ public class DeadServer { assert numProcessing >= 0: "Number of dead servers in processing should always be non-negative"; - if (numProcessing < 0) { - LOG.error("Number of dead servers in processing = " + numProcessing - + ". Something went wrong, this should always be non-negative."); - numProcessing = 0; - } if (numProcessing == 0) { processing = false; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 3a2926cb82b..93e532bdc93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -709,9 +709,8 @@ public class ServerManager { if (!services.getAssignmentManager().isFailoverCleanupDone()) { LOG.info("AssignmentManager hasn't finished failover cleanup; waiting"); } - - for(ServerName tmpServerName : requeuedDeadServers.keySet()){ - processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName)); + for (Map.Entry entry : requeuedDeadServers.entrySet()) { + processDeadServer(entry.getKey(), entry.getValue()); } requeuedDeadServers.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index b6b317d6db7..3bb12cee448 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -1096,6 +1096,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { protected MetricsBalancer metricsBalancer = null; protected ClusterStatus clusterStatus = null; protected ServerName masterServerName; + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", + justification="The services is just assigned once when master start") protected MasterServices services; protected static String[] getTablesOnMaster(Configuration conf) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index fc737a96f35..fe2a16a85ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2816,7 +2816,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi checkResources(); startRegionOperation(Operation.DELETE); try { - delete.getRow(); // All edits for the given row (across all column families) must happen atomically. doBatchMutate(delete); } finally { @@ -3148,7 +3147,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } - @SuppressWarnings("unchecked") private long doMiniBatchMutation(BatchOperationInProgress batchOp) throws IOException { boolean isInReplay = batchOp.isInReplay(); // variable to note if all Put items are for the same CF -- metrics related @@ -3402,7 +3400,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // They don't have to be, it will still work, just write more WALEdits than needed. if (nonceGroup != currentNonceGroup || nonce != currentNonce) { if (walEdit.size() > 0) { - assert isInReplay; if (!isInReplay) { throw new IOException("Multiple nonces per batch and not in replay"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 849eba4e384..107e1c311ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -816,7 +816,8 @@ public class HRegionServer extends HasThread implements * @throws IOException * @throws InterruptedException */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value={"RV_RETURN_VALUE_IGNORED_BAD_PRACTICE", "RV_RETURN_VALUE_IGNORED"}, justification="cluster Id znode read would give us correct response") private void initializeZooKeeper() throws IOException, InterruptedException { // Create the master address tracker, register with zk, and start it. Then diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java index b206055e835..2ec34834722 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ExplicitColumnTracker.java @@ -137,15 +137,13 @@ public class ExplicitColumnTracker implements ColumnTracker { // is interested in. That means there is no more data for the column // of interest. Advance the ExplicitColumnTracker state to next // column of interest, and check again. - if (ret <= -1) { - ++this.index; - if (done()) { - // No more to match, do not include, done with this row. - return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row - } - // This is the recursive case. - this.column = this.columns[this.index]; + ++this.index; + if (done()) { + // No more to match, do not include, done with this row. + return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row } + // This is the recursive case. + this.column = this.columns[this.index]; } while (true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 8378b9bfe7e..3278f0cfa5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -461,7 +461,7 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf @Override public int compare(Path o1, Path o2) { - return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); + return Long.compare(getTS(o1), getTS(o2)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 92c47c72007..60c20799b93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -1077,10 +1077,11 @@ public final class Canary implements Tool { } } Map actualReadTableLatency = regionSink.getReadLatencyMap(); - for (String tableName : this.configuredReadTableTimeouts.keySet()) { + for (Map.Entry entry : configuredReadTableTimeouts.entrySet()) { + String tableName = entry.getKey(); if (actualReadTableLatency.containsKey(tableName)) { Long actual = actualReadTableLatency.get(tableName).longValue(); - Long configured = this.configuredReadTableTimeouts.get(tableName); + Long configured = entry.getValue(); LOG.info("Read operation for " + tableName + " took " + actual + " ms. The configured read timeout was " + configured + " ms."); if (actual > configured) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 293820b1b85..005e948089a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -1769,12 +1769,13 @@ public class WALSplitter { int maxSize = 0; List> maxQueue = null; synchronized (this.serverToBufferQueueMap) { - for (String key : this.serverToBufferQueueMap.keySet()) { - List> curQueue = this.serverToBufferQueueMap.get(key); + for (Map.Entry>> entry: + serverToBufferQueueMap.entrySet()) { + List> curQueue = entry.getValue(); if (curQueue.size() > maxSize) { maxSize = curQueue.size(); maxQueue = curQueue; - maxLocKey = key; + maxLocKey = entry.getKey(); } } if (maxSize < minBatchSize @@ -2065,8 +2066,10 @@ public class WALSplitter { int curSize = 0; List> curQueue = null; synchronized (this.serverToBufferQueueMap) { - for (String locationKey : this.serverToBufferQueueMap.keySet()) { - curQueue = this.serverToBufferQueueMap.get(locationKey); + for (Map.Entry>> entry : + serverToBufferQueueMap.entrySet()) { + String locationKey = entry.getKey(); + curQueue = entry.getValue(); if (!curQueue.isEmpty()) { curSize = curQueue.size(); curLoc = locationKey; @@ -2144,8 +2147,9 @@ public class WALSplitter { } } finally { synchronized (writers) { - for (String locationKey : writers.keySet()) { - RegionServerWriter tmpW = writers.get(locationKey); + for (Map.Entry entry : writers.entrySet()) { + String locationKey = entry.getKey(); + RegionServerWriter tmpW = entry.getValue(); try { tmpW.close(); } catch (IOException ioe) { @@ -2157,8 +2161,10 @@ public class WALSplitter { // close connections synchronized (this.tableNameToHConnectionMap) { - for (TableName tableName : this.tableNameToHConnectionMap.keySet()) { - HConnection hconn = this.tableNameToHConnectionMap.get(tableName); + for (Map.Entry entry : + tableNameToHConnectionMap.entrySet()) { + TableName tableName = entry.getKey(); + HConnection hconn = entry.getValue(); try { hconn.clearRegionCache(); hconn.close();