From 8575edd908da86a1f515599506fbf5abf4aa9329 Mon Sep 17 00:00:00 2001 From: Jonathan Hsieh Date: Wed, 11 Apr 2012 20:04:12 +0000 Subject: [PATCH] HBASE-5645 [findbugs] Fix correctness warnings (David S Wang and Uma Maheswara Rao G) git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1324969 13f79535-47bb-0310-9956-ffa450edef68 --- dev-support/findHangingTest.sh | 1 - dev-support/test-patch.properties | 2 +- .../java/org/apache/hadoop/hbase/io/hfile/HFile.java | 4 +--- .../apache/hadoop/hbase/mapreduce/CellCounter.java | 12 ++++++------ .../hadoop/hbase/master/AssignmentManager.java | 3 +-- .../hadoop/hbase/master/DefaultLoadBalancer.java | 4 ++-- .../java/org/apache/hadoop/hbase/master/HMaster.java | 2 +- .../apache/hadoop/hbase/master/ServerManager.java | 2 +- .../hadoop/hbase/regionserver/MemStoreLAB.java | 5 ++--- .../apache/hadoop/hbase/regionserver/StoreFile.java | 2 +- .../replication/regionserver/ReplicationSource.java | 5 ----- .../apache/hadoop/hbase/util/FSTableDescriptors.java | 2 +- .../java/org/apache/hadoop/hbase/util/FSUtils.java | 2 +- .../org/apache/hadoop/hbase/util/JVMClusterUtil.java | 6 +++--- .../java/org/apache/hadoop/hbase/util/PoolMap.java | 4 ++-- 15 files changed, 23 insertions(+), 33 deletions(-) diff --git a/dev-support/findHangingTest.sh b/dev-support/findHangingTest.sh index 4518c680ece..f7ebe47f093 100755 --- a/dev-support/findHangingTest.sh +++ b/dev-support/findHangingTest.sh @@ -38,4 +38,3 @@ cat jenkins.out | while read line; do prevLine=$line fi done -rm jenkins.out diff --git a/dev-support/test-patch.properties b/dev-support/test-patch.properties index 5294e449601..ba088da3996 100644 --- a/dev-support/test-patch.properties +++ b/dev-support/test-patch.properties @@ -19,5 +19,5 @@ MAVEN_OPTS="-Xmx3g" # Please update the per-module test-patch.properties if you update this file. OK_RELEASEAUDIT_WARNINGS=84 -OK_FINDBUGS_WARNINGS=561 +OK_FINDBUGS_WARNINGS=549 OK_JAVADOC_WARNINGS=169 diff --git a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index c4a8a9e5faf..8e78a6011e3 100644 --- a/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -558,9 +558,7 @@ public class HFile { hfs = (HFileSystem)fs; // open a stream to read data without checksum verification in // the filesystem - if (hfs != null) { - fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); - } + fsdisNoFsChecksum = hfs.getNoChecksumFs().open(path); } return pickReaderVersion(path, fsdis, fsdisNoFsChecksum, fs.getFileStatus(path).getLen(), closeIStream, cacheConf, diff --git a/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index 63b2ed5b079..25776be5da1 100644 --- a/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -127,8 +127,8 @@ public class CellCounter { new IntWritable(1)); context.write(new Text(thisRowFamilyName), new IntWritable(1)); } - String thisRowQualifierName = - thisRowFamilyName + separator + Bytes.toStringBinary(value.getQualifier()); + String thisRowQualifierName = thisRowFamilyName + separator + + Bytes.toStringBinary(value.getQualifier()); if (thisRowQualifierName != null && !thisRowQualifierName.equals(currentQualifierName)) { currentQualifierName = thisRowQualifierName; @@ -139,16 +139,16 @@ public class CellCounter { // Intialize versions context.getCounter("QL_VERSIONS", currentRowKey + separator + thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + - "_Versions"), new IntWritable(1)); + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), new IntWritable(1)); } else { // Increment versions currentQualifierName = thisRowQualifierName; context.getCounter("QL_VERSIONS", currentRowKey + separator + thisRowQualifierName).increment(1); - context.write(new Text(currentRowKey + separator + thisRowQualifierName + "_Versions"), - new IntWritable(1)); + context.write(new Text(currentRowKey + separator + + thisRowQualifierName + "_Versions"), new IntWritable(1)); } } } catch (InterruptedException e) { diff --git a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index de4b1e60ad4..ab33ac7be17 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -927,7 +927,7 @@ public class AssignmentManager extends ZooKeeperListener { return p.getFirst(); } catch (IOException e) { master.abort("Aborting because error occoured while reading " - + data.getRegionName() + " from .META.", e); + + Bytes.toStringBinary(data.getRegionName()) + " from .META.", e); return null; } } @@ -1756,7 +1756,6 @@ public class AssignmentManager extends ZooKeeperListener { boolean asyncSetOfflineInZooKeeper(final RegionState state, final AsyncCallback.StringCallback cb, final Object ctx) { if (!state.isClosed() && !state.isOffline()) { - new RuntimeException("Unexpected state trying to OFFLINE; " + state); this.master.abort("Unexpected state trying to OFFLINE; " + state, new IllegalStateException()); return false; diff --git a/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java b/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index 1f351d2f84a..7071b4d444b 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ b/src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -691,8 +691,8 @@ public class DefaultLoadBalancer implements LoadBalancer { get(Bytes.toString(tableName)); } } catch (FileNotFoundException fnfe) { - LOG.debug("FileNotFoundException during getTableDescriptors." + - " Current table name = " + tableName , fnfe); + LOG.debug("FileNotFoundException during getTableDescriptors." + + " Current table name = " + Bytes.toStringBinary(tableName), fnfe); } return tableDescriptor; diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 659c4af85d6..6b0653020b3 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -256,7 +256,7 @@ Server { // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); if (initialIsa.getAddress() == null) { - throw new IllegalArgumentException("Failed resolve of " + this.isa); + throw new IllegalArgumentException("Failed resolve of " + initialIsa); } int numHandlers = conf.getInt("hbase.master.handler.count", conf.getInt("hbase.regionserver.handler.count", 25)); diff --git a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 1c253a0345a..b5f4cb6f6e9 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -533,7 +533,7 @@ public class ServerManager { */ private HRegionInterface getServerConnection(final ServerName sn) throws IOException { - HRegionInterface hri = this.serverConnections.get(sn.toString()); + HRegionInterface hri = this.serverConnections.get(sn); if (hri == null) { LOG.debug("New connection to " + sn.toString()); hri = this.connection.getHRegionConnection(sn.getHostname(), sn.getPort()); diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 26948978544..93344df4150 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -259,9 +259,8 @@ public class MemStoreLAB { @Override public String toString() { - return "Allocation(data=" + data + - " with capacity=" + data.length + - ", off=" + offset + ")"; + return "Allocation(" + "capacity=" + data.length + ", off=" + offset + + ")"; } byte[] getData() { diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 1636bfb5a0b..c33e95110a2 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -963,7 +963,7 @@ public class StoreFile extends SchemaConfigured { .withPath(fs, path) .withBlockSize(blocksize) .withCompression(compress) - .withDataBlockEncoder(dataBlockEncoder) + .withDataBlockEncoder(this.dataBlockEncoder) .withComparator(comparator.getRawComparator()) .withChecksumType(checksumType) .withBytesPerChecksum(bytesPerChecksum) diff --git a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 545bd02e45b..04fe8b62085 100644 --- a/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -791,11 +791,6 @@ public class ReplicationSource extends Thread return Long.valueOf(getTS(o1)).compareTo(getTS(o2)); } - @Override - public boolean equals(Object o) { - return true; - } - /** * Split a path to get the start time * For example: 10.20.20.171%3A60020.1277499063250 diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 62cf6acf350..efb2b843cd6 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -273,7 +273,7 @@ public class FSTableDescriptors implements TableDescriptors { Path p = status[i].getPath(); // Clean up old versions if (!fs.delete(p, false)) { - LOG.warn("Failed cleanup of " + status); + LOG.warn("Failed cleanup of " + p); } else { LOG.debug("Cleaned up old tableinfo file " + p); } diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index aebe5b025cd..b9c47fc89d3 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -832,7 +832,7 @@ public abstract class FSUtils { public boolean accept(Path p) { boolean isValid = false; try { - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p.toString())) { isValid = false; } else { isValid = this.fs.getFileStatus(p).isDir(); diff --git a/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index c21377c066a..a5d067086a7 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -231,9 +231,8 @@ public class JVMClusterUtil { } } } - // regionServerThreads can never be null because they are initialized when - // the class is constructed. - for(RegionServerThread t: regionservers) { + if (regionservers != null) { + for (RegionServerThread t : regionservers) { if (t.isAlive()) { try { t.getRegionServer().stop("Shutdown requested"); @@ -243,6 +242,7 @@ public class JVMClusterUtil { } } } + } if (masters != null) { for (JVMClusterUtil.MasterThread t : masters) { while (t.master.isAlive()) { diff --git a/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 8e2a8567144..7caf796dcf7 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -192,8 +192,8 @@ public class PoolMap implements Map { for (Map.Entry> poolEntry : pools.entrySet()) { final K poolKey = poolEntry.getKey(); final Pool pool = poolEntry.getValue(); - for (final V poolValue : pool.values()) { - if (pool != null) { + if (pool != null) { + for (final V poolValue : pool.values()) { entries.add(new Map.Entry() { @Override public K getKey() {