diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 2cb587bd8f8..20cfbeff898 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -920,13 +920,13 @@ public class HColumnDescriptor implements Comparable { boolean hasConfigKeys = false; // print all reserved keys first - for (Bytes k : values.keySet()) { - if (!RESERVED_KEYWORDS.contains(k)) { + for (Map.Entry entry : values.entrySet()) { + if (!RESERVED_KEYWORDS.contains(entry.getKey())) { hasConfigKeys = true; continue; } - String key = Bytes.toString(k.get()); - String value = Bytes.toStringBinary(values.get(k).get()); + String key = Bytes.toString(entry.getKey().get()); + String value = Bytes.toStringBinary(entry.getValue().get()); if (printDefaults || !DEFAULT_VALUES.containsKey(key) || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index a0a3c796196..f2f00773e68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -886,16 +886,16 @@ public class HTableDescriptor implements Comparable { // step 1: set partitioning and pruning Set reservedKeys = new TreeSet(); Set userKeys = new TreeSet(); - for (Bytes k : values.keySet()) { - if (k == null || k.get() == null) continue; - String key = Bytes.toString(k.get()); + for (Map.Entry entry : values.entrySet()) { + if (entry.getKey() == null || entry.getKey().get() == null) continue; + String key = Bytes.toString(entry.getKey().get()); // in this section, print out reserved keywords + coprocessor info - if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) { - userKeys.add(k); + if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { + userKeys.add(entry.getKey()); continue; } // only print out IS_ROOT/IS_META if true - String value = Bytes.toString(values.get(k).get()); + String value = Bytes.toString(entry.getValue().get()); if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) { if (Boolean.valueOf(value) == false) continue; } @@ -903,7 +903,7 @@ public class HTableDescriptor implements Comparable { if (printDefaults || !DEFAULT_VALUES.containsKey(key) || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { - reservedKeys.add(k); + reservedKeys.add(entry.getKey()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 1ed251465fb..fa99a928b31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -677,8 +677,8 @@ public class ServerManager { LOG.info("AssignmentManager hasn't finished failover cleanup; waiting"); } - for(ServerName tmpServerName : requeuedDeadServers.keySet()){ - processDeadServer(tmpServerName, requeuedDeadServers.get(tmpServerName)); + for (Map.Entry entry : requeuedDeadServers.entrySet()) { + processDeadServer(entry.getKey(), entry.getValue()); } requeuedDeadServers.clear(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 2c68a6895a1..b9b84860b2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; @@ -1673,12 +1672,13 @@ public class WALSplitter { int maxSize = 0; List> maxQueue = null; synchronized (this.serverToBufferQueueMap) { - for (String key : this.serverToBufferQueueMap.keySet()) { - List> curQueue = this.serverToBufferQueueMap.get(key); + for (Map.Entry>> entry : + this.serverToBufferQueueMap.entrySet()) { + List> curQueue = entry.getValue(); if (curQueue.size() > maxSize) { maxSize = curQueue.size(); maxQueue = curQueue; - maxLocKey = key; + maxLocKey = entry.getKey(); } } if (maxSize < minBatchSize @@ -1969,11 +1969,12 @@ public class WALSplitter { int curSize = 0; List> curQueue = null; synchronized (this.serverToBufferQueueMap) { - for (String locationKey : this.serverToBufferQueueMap.keySet()) { - curQueue = this.serverToBufferQueueMap.get(locationKey); + for (Map.Entry>> entry : + this.serverToBufferQueueMap.entrySet()) { + curQueue = entry.getValue(); if (!curQueue.isEmpty()) { curSize = curQueue.size(); - curLoc = locationKey; + curLoc = entry.getKey(); break; } } @@ -2043,12 +2044,12 @@ public class WALSplitter { } } finally { synchronized (writers) { - for (String locationKey : writers.keySet()) { - RegionServerWriter tmpW = writers.get(locationKey); + for (Map.Entry entry : writers.entrySet()) { + RegionServerWriter tmpW = entry.getValue(); try { tmpW.close(); } catch (IOException ioe) { - LOG.error("Couldn't close writer for region server:" + locationKey, ioe); + LOG.error("Couldn't close writer for region server:" + entry.getKey(), ioe); result.add(ioe); } } @@ -2056,8 +2057,9 @@ public class WALSplitter { // close connections synchronized (this.tableNameToHConnectionMap) { - for (TableName tableName : this.tableNameToHConnectionMap.keySet()) { - HConnection hconn = this.tableNameToHConnectionMap.get(tableName); + for (Map.Entry entry : + this.tableNameToHConnectionMap.entrySet()) { + HConnection hconn = entry.getValue(); try { hconn.clearRegionCache(); hconn.close();