HBASE-13760 Cleanup Findbugs keySet iterator warnings. (Gabor Liptak)

This commit is contained in:
anoopsjohn 2015-05-25 22:11:49 +05:30
parent d45e0a7d41
commit 9ee184b673
4 changed files with 27 additions and 25 deletions

View File

@ -920,13 +920,13 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
boolean hasConfigKeys = false; boolean hasConfigKeys = false;
// print all reserved keys first // print all reserved keys first
for (Bytes k : values.keySet()) { for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
if (!RESERVED_KEYWORDS.contains(k)) { if (!RESERVED_KEYWORDS.contains(entry.getKey())) {
hasConfigKeys = true; hasConfigKeys = true;
continue; continue;
} }
String key = Bytes.toString(k.get()); String key = Bytes.toString(entry.getKey().get());
String value = Bytes.toStringBinary(values.get(k).get()); String value = Bytes.toStringBinary(entry.getValue().get());
if (printDefaults if (printDefaults
|| !DEFAULT_VALUES.containsKey(key) || !DEFAULT_VALUES.containsKey(key)
|| !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {

View File

@ -886,16 +886,16 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
// step 1: set partitioning and pruning // step 1: set partitioning and pruning
Set<Bytes> reservedKeys = new TreeSet<Bytes>(); Set<Bytes> reservedKeys = new TreeSet<Bytes>();
Set<Bytes> userKeys = new TreeSet<Bytes>(); Set<Bytes> userKeys = new TreeSet<Bytes>();
for (Bytes k : values.keySet()) { for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) {
if (k == null || k.get() == null) continue; if (entry.getKey() == null || entry.getKey().get() == null) continue;
String key = Bytes.toString(k.get()); String key = Bytes.toString(entry.getKey().get());
// in this section, print out reserved keywords + coprocessor info // in this section, print out reserved keywords + coprocessor info
if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) { if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) {
userKeys.add(k); userKeys.add(entry.getKey());
continue; continue;
} }
// only print out IS_ROOT/IS_META if true // only print out IS_ROOT/IS_META if true
String value = Bytes.toString(values.get(k).get()); String value = Bytes.toString(entry.getValue().get());
if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) { if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
if (Boolean.valueOf(value) == false) continue; if (Boolean.valueOf(value) == false) continue;
} }
@ -903,7 +903,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
if (printDefaults if (printDefaults
|| !DEFAULT_VALUES.containsKey(key) || !DEFAULT_VALUES.containsKey(key)
|| !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
reservedKeys.add(k); reservedKeys.add(entry.getKey());
} }
} }

View File

@ -677,8 +677,8 @@ public class ServerManager {
LOG.info("AssignmentManager hasn't finished failover cleanup; waiting"); LOG.info("AssignmentManager hasn't finished failover cleanup; waiting");
} }
for(ServerName tmpServerName : requeuedDeadServers.keySet()){ for (Map.Entry<ServerName, Boolean> entry : requeuedDeadServers.entrySet()) {
processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName)); processDeadServer(entry.getKey(), entry.getValue());
} }
requeuedDeadServers.clear(); requeuedDeadServers.clear();
} }

View File

@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
@ -1673,12 +1672,13 @@ public class WALSplitter {
int maxSize = 0; int maxSize = 0;
List<Pair<HRegionLocation, Entry>> maxQueue = null; List<Pair<HRegionLocation, Entry>> maxQueue = null;
synchronized (this.serverToBufferQueueMap) { synchronized (this.serverToBufferQueueMap) {
for (String key : this.serverToBufferQueueMap.keySet()) { for (Map.Entry<String, List<Pair<HRegionLocation, Entry>>> entry :
List<Pair<HRegionLocation, Entry>> curQueue = this.serverToBufferQueueMap.get(key); this.serverToBufferQueueMap.entrySet()) {
List<Pair<HRegionLocation, Entry>> curQueue = entry.getValue();
if (curQueue.size() > maxSize) { if (curQueue.size() > maxSize) {
maxSize = curQueue.size(); maxSize = curQueue.size();
maxQueue = curQueue; maxQueue = curQueue;
maxLocKey = key; maxLocKey = entry.getKey();
} }
} }
if (maxSize < minBatchSize if (maxSize < minBatchSize
@ -1969,11 +1969,12 @@ public class WALSplitter {
int curSize = 0; int curSize = 0;
List<Pair<HRegionLocation, Entry>> curQueue = null; List<Pair<HRegionLocation, Entry>> curQueue = null;
synchronized (this.serverToBufferQueueMap) { synchronized (this.serverToBufferQueueMap) {
for (String locationKey : this.serverToBufferQueueMap.keySet()) { for (Map.Entry<String, List<Pair<HRegionLocation, Entry>>> entry :
curQueue = this.serverToBufferQueueMap.get(locationKey); this.serverToBufferQueueMap.entrySet()) {
curQueue = entry.getValue();
if (!curQueue.isEmpty()) { if (!curQueue.isEmpty()) {
curSize = curQueue.size(); curSize = curQueue.size();
curLoc = locationKey; curLoc = entry.getKey();
break; break;
} }
} }
@ -2043,12 +2044,12 @@ public class WALSplitter {
} }
} finally { } finally {
synchronized (writers) { synchronized (writers) {
for (String locationKey : writers.keySet()) { for (Map.Entry<String, RegionServerWriter> entry : writers.entrySet()) {
RegionServerWriter tmpW = writers.get(locationKey); RegionServerWriter tmpW = entry.getValue();
try { try {
tmpW.close(); tmpW.close();
} catch (IOException ioe) { } catch (IOException ioe) {
LOG.error("Couldn't close writer for region server:" + locationKey, ioe); LOG.error("Couldn't close writer for region server:" + entry.getKey(), ioe);
result.add(ioe); result.add(ioe);
} }
} }
@ -2056,8 +2057,9 @@ public class WALSplitter {
// close connections // close connections
synchronized (this.tableNameToHConnectionMap) { synchronized (this.tableNameToHConnectionMap) {
for (TableName tableName : this.tableNameToHConnectionMap.keySet()) { for (Map.Entry<TableName,HConnection> entry :
HConnection hconn = this.tableNameToHConnectionMap.get(tableName); this.tableNameToHConnectionMap.entrySet()) {
HConnection hconn = entry.getValue();
try { try {
hconn.clearRegionCache(); hconn.clearRegionCache();
hconn.close(); hconn.close();