From 19099c98b12f67747af78ca0b436a899a26467b0 Mon Sep 17 00:00:00 2001 From: Jim Kellerman Date: Wed, 23 May 2007 15:14:00 +0000 Subject: [PATCH] HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed class HLocking that broke main build. git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@540973 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 2 + .../apache/hadoop/hbase/HAbstractScanner.java | 2 +- src/java/org/apache/hadoop/hbase/HClient.java | 39 ++++------- src/java/org/apache/hadoop/hbase/HLog.java | 7 +- src/java/org/apache/hadoop/hbase/HLogKey.java | 13 ++++ src/java/org/apache/hadoop/hbase/HMaster.java | 9 ++- .../org/apache/hadoop/hbase/HMemcache.java | 70 ++++++++----------- src/java/org/apache/hadoop/hbase/HRegion.java | 22 +++--- .../apache/hadoop/hbase/HRegionServer.java | 30 ++++---- .../apache/hadoop/hbase/HRegiondirReader.java | 9 ++- .../apache/hadoop/hbase/HServerAddress.java | 10 +-- src/java/org/apache/hadoop/hbase/HStore.java | 35 ++++------ .../org/apache/hadoop/hbase/HStoreFile.java | 22 +++--- .../org/apache/hadoop/hbase/HStoreKey.java | 18 +++++ .../apache/hadoop/hbase/HTableDescriptor.java | 18 +++++ src/java/org/apache/hadoop/hbase/Leases.java | 14 ++++ 16 files changed, 181 insertions(+), 139 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 3569757a9d9..65fd5cb1c10 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,3 +12,5 @@ Trunk (unreleased changes) tolerant. 6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do 'Performance Evaluation', etc. + 7. HADOOP-1420, HADOOP-1423. Findbugs changes, remove reference to removed + class HLocking. diff --git a/src/java/org/apache/hadoop/hbase/HAbstractScanner.java b/src/java/org/apache/hadoop/hbase/HAbstractScanner.java index 50fae6ae65d..557e1310102 100644 --- a/src/java/org/apache/hadoop/hbase/HAbstractScanner.java +++ b/src/java/org/apache/hadoop/hbase/HAbstractScanner.java @@ -50,7 +50,7 @@ public abstract class HAbstractScanner implements HInternalScannerInterface { // 2. Match on the column family + column key regex // 3. Simple match: compare column family + column key literally - private class ColumnMatcher { + private static class ColumnMatcher { private boolean wildCardmatch; private MATCH_TYPE matchType; private String family; diff --git a/src/java/org/apache/hadoop/hbase/HClient.java b/src/java/org/apache/hadoop/hbase/HClient.java index f9e2b053147..e134a1af34b 100644 --- a/src/java/org/apache/hadoop/hbase/HClient.java +++ b/src/java/org/apache/hadoop/hbase/HClient.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Random; import java.util.TreeMap; @@ -50,7 +49,7 @@ public class HClient implements HConstants { private HMasterInterface master; private final Configuration conf; - private class TableInfo { + private static class TableInfo { public HRegionInfo regionInfo; public HServerAddress serverAddress; @@ -133,7 +132,6 @@ public class HClient implements HConstants { try { Thread.sleep(this.clientTimeout); - } catch(InterruptedException e) { } } @@ -179,9 +177,9 @@ public class HClient implements HConstants { TreeMap metaServers = this.tablesToServers.get(META_TABLE_NAME); - if(metaServers == null) { // Don't know where the meta is + if (metaServers == null) { // Don't know where the meta is loadMetaFromRoot(tableName); - if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) { + if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) { // All we really wanted was the meta or root table return; } @@ -192,32 +190,19 @@ public class HClient implements HConstants { for(int tries = 0; this.tableServers.size() == 0 && tries < this.numRetries; tries++) { - - Text firstMetaRegion = null; - if(metaServers.containsKey(tableName)) { - firstMetaRegion = tableName; - - } else { - firstMetaRegion = metaServers.headMap(tableName).lastKey(); - } - for(Iterator i - = metaServers.tailMap(firstMetaRegion).values().iterator(); - i.hasNext(); ) { - - TableInfo t = i.next(); - + Text firstMetaRegion = (metaServers.containsKey(tableName))? + tableName: metaServers.headMap(tableName).lastKey(); + for(TableInfo t: metaServers.tailMap(firstMetaRegion).values()) { scanOneMetaRegion(t, tableName); } - if(this.tableServers.size() == 0) { + if (this.tableServers.size() == 0) { // Table not assigned. Sleep and try again - - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Sleeping. Table " + tableName + " not currently being served."); } try { Thread.sleep(this.clientTimeout); - } catch(InterruptedException e) { } if(LOG.isDebugEnabled()) { @@ -225,7 +210,7 @@ public class HClient implements HConstants { } } } - if(this.tableServers.size() == 0) { + if (this.tableServers.size() == 0) { throw new IOException("failed to scan " + META_TABLE_NAME + " after " + this.numRetries + " retries"); } @@ -976,7 +961,9 @@ public class HClient implements HConstants { printUsage(); break; } - } catch (Exception e) { + } catch (IOException e) { + e.printStackTrace(); + } catch (RuntimeException e) { e.printStackTrace(); } @@ -988,4 +975,4 @@ public class HClient implements HConstants { int errCode = (new HClient(c)).doCommandLine(args); System.exit(errCode); } -} \ No newline at end of file +} diff --git a/src/java/org/apache/hadoop/hbase/HLog.java b/src/java/org/apache/hadoop/hbase/HLog.java index 07de6916920..e0ab031fb44 100644 --- a/src/java/org/apache/hadoop/hbase/HLog.java +++ b/src/java/org/apache/hadoop/hbase/HLog.java @@ -72,14 +72,13 @@ public class HLog implements HConstants { boolean insideCacheFlush = false; TreeMap regionToLastFlush = new TreeMap(); - long oldestOutstandingSeqNum = -1; boolean closed = false; transient long logSeqNum = 0; long filenum = 0; transient int numEntries = 0; - Integer rollLock = new Integer(0); + Integer rollLock = 0; /** * Bundle up a bunch of log files (which are no longer being written to), @@ -319,10 +318,10 @@ public class HLog implements HConstants { } int counter = 0; - for (Text column: columns.keySet()) { + for (Map.Entry es: columns.entrySet()) { HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]); - HLogEdit logEdit = new HLogEdit(column, columns.get(column), timestamp); + HLogEdit logEdit = new HLogEdit(es.getKey(), es.getValue(), timestamp); writer.append(logKey, logEdit); numEntries++; } diff --git a/src/java/org/apache/hadoop/hbase/HLogKey.java b/src/java/org/apache/hadoop/hbase/HLogKey.java index ac4891a025b..5dd128ea235 100644 --- a/src/java/org/apache/hadoop/hbase/HLogKey.java +++ b/src/java/org/apache/hadoop/hbase/HLogKey.java @@ -72,6 +72,19 @@ public class HLogKey implements WritableComparable { return getTablename().toString() + " " + getRegionName().toString() + " " + getRow().toString() + " " + getLogSeqNum(); } + + @Override + public boolean equals(Object obj) { + return compareTo(obj) == 0; + } + + @Override + public int hashCode() { + int result = this.regionName.hashCode(); + result ^= this.row.hashCode(); + result ^= Long.valueOf(this.logSeqNum).hashCode(); + return result; + } ////////////////////////////////////////////////////////////////////////////// // Comparable diff --git a/src/java/org/apache/hadoop/hbase/HMaster.java b/src/java/org/apache/hadoop/hbase/HMaster.java index 3d0f139a61a..a8a80e947c4 100644 --- a/src/java/org/apache/hadoop/hbase/HMaster.java +++ b/src/java/org/apache/hadoop/hbase/HMaster.java @@ -294,9 +294,9 @@ public class HMaster implements HConstants, HMasterInterface, private RootScanner rootScanner; private Thread rootScannerThread; - private Integer rootScannerLock = new Integer(0); + private Integer rootScannerLock = 0; - private class MetaRegion { + private static class MetaRegion { public HServerAddress server; public Text regionName; public Text startKey; @@ -426,7 +426,7 @@ public class HMaster implements HConstants, HMasterInterface, private MetaScanner metaScanner; private Thread metaScannerThread; - private Integer metaScannerLock = new Integer(0); + private Integer metaScannerLock = 0; // The 'unassignedRegions' table maps from a region name to a HRegionInfo record, // which includes the region's table, its id, and its start/end keys. @@ -1136,7 +1136,6 @@ public class HMaster implements HConstants, HMasterInterface, } } - scannerId = -1L; } // Remove server from root/meta entries @@ -1739,7 +1738,7 @@ public class HMaster implements HConstants, HMasterInterface, private String server; public ServerExpirer(String server) { - this.server = new String(server); + this.server = server; } public void leaseExpired() { diff --git a/src/java/org/apache/hadoop/hbase/HMemcache.java b/src/java/org/apache/hadoop/hbase/HMemcache.java index ac9ad34f4ba..87616e25f2d 100644 --- a/src/java/org/apache/hadoop/hbase/HMemcache.java +++ b/src/java/org/apache/hadoop/hbase/HMemcache.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.LogFactory; import java.io.*; import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; /******************************************************************************* * The HMemcache holds in-memory modifications to the HRegion. This is really a @@ -38,9 +39,10 @@ public class HMemcache { TreeMap snapshot = null; - HLocking lock = new HLocking(); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); public HMemcache() { + super(); } public static class Snapshot { @@ -48,6 +50,7 @@ public class HMemcache { public long sequenceId = 0; public Snapshot() { + super(); } } @@ -67,7 +70,7 @@ public class HMemcache { public Snapshot snapshotMemcacheForLog(HLog log) throws IOException { Snapshot retval = new Snapshot(); - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { if(snapshot != null) { throw new IOException("Snapshot in progress!"); @@ -96,7 +99,7 @@ public class HMemcache { return retval; } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } @@ -106,7 +109,7 @@ public class HMemcache { * Modifying the structure means we need to obtain a writelock. */ public void deleteSnapshot() throws IOException { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { if(snapshot == null) { @@ -132,7 +135,7 @@ public class HMemcache { } } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } @@ -142,18 +145,14 @@ public class HMemcache { * Operation uses a write lock. */ public void add(Text row, TreeMap columns, long timestamp) { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { - for(Iterator it = columns.keySet().iterator(); it.hasNext(); ) { - Text column = it.next(); - BytesWritable val = columns.get(column); - - HStoreKey key = new HStoreKey(row, column, timestamp); - memcache.put(key, val); + for (Map.Entry es: columns.entrySet()) { + HStoreKey key = new HStoreKey(row, es.getKey(), timestamp); + memcache.put(key, es.getValue()); } - } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } @@ -164,7 +163,7 @@ public class HMemcache { */ public BytesWritable[] get(HStoreKey key, int numVersions) { Vector results = new Vector(); - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { Vector result = get(memcache, key, numVersions-results.size()); results.addAll(0, result); @@ -178,15 +177,10 @@ public class HMemcache { results.addAll(results.size(), result); } - if(results.size() == 0) { - return null; - - } else { - return results.toArray(new BytesWritable[results.size()]); - } - + return (results.size() == 0)? + null: results.toArray(new BytesWritable[results.size()]); } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } @@ -196,9 +190,9 @@ public class HMemcache { * * The returned object should map column names to byte arrays (byte[]). */ - public TreeMap getFull(HStoreKey key) throws IOException { + public TreeMap getFull(HStoreKey key) { TreeMap results = new TreeMap(); - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { internalGetFull(memcache, key, results); for(int i = history.size()-1; i >= 0; i--) { @@ -208,25 +202,22 @@ public class HMemcache { return results; } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } void internalGetFull(TreeMap map, HStoreKey key, TreeMap results) { - SortedMap tailMap = map.tailMap(key); - - for(Iterator it = tailMap.keySet().iterator(); it.hasNext(); ) { - HStoreKey itKey = it.next(); + for (Map.Entry es: tailMap.entrySet()) { + HStoreKey itKey = es.getKey(); Text itCol = itKey.getColumn(); - - if(results.get(itCol) == null + if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) { BytesWritable val = tailMap.get(itKey); results.put(itCol, val); - } else if(key.getRow().compareTo(itKey.getRow()) > 0) { + } else if (key.getRow().compareTo(itKey.getRow()) > 0) { break; } } @@ -246,15 +237,14 @@ public class HMemcache { HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp()); SortedMap tailMap = map.tailMap(curKey); - for(Iterator it = tailMap.keySet().iterator(); it.hasNext(); ) { - HStoreKey itKey = it.next(); - - if(itKey.matchesRowCol(curKey)) { + for (Map.Entry es: tailMap.entrySet()) { + HStoreKey itKey = es.getKey(); + if (itKey.matchesRowCol(curKey)) { result.add(tailMap.get(itKey)); curKey.setVersion(itKey.getTimestamp() - 1); } - if(numVersions > 0 && result.size() >= numVersions) { + if (numVersions > 0 && result.size() >= numVersions) { break; } } @@ -285,7 +275,7 @@ public class HMemcache { super(timestamp, targetCols); - lock.obtainReadLock(); + lock.readLock().lock(); try { this.backingMaps = new TreeMap[history.size() + 1]; @@ -377,7 +367,7 @@ public class HMemcache { } } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); scannerClosed = true; } } diff --git a/src/java/org/apache/hadoop/hbase/HRegion.java b/src/java/org/apache/hadoop/hbase/HRegion.java index c7ec0d22b04..b5d000a1973 100644 --- a/src/java/org/apache/hadoop/hbase/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/HRegion.java @@ -171,12 +171,11 @@ public class HRegion implements HConstants { LOG.debug("merging stores"); } - for(Iterator it = filesToMerge.keySet().iterator(); it.hasNext(); ) { - Text colFamily = it.next(); - Vector srcFiles = filesToMerge.get(colFamily); + for (Map.Entry> es: filesToMerge.entrySet()) { + Text colFamily = es.getKey(); + Vector srcFiles = es.getValue(); HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, colFamily, Math.abs(rand.nextLong())); - dst.mergeStoreFiles(srcFiles, fs, conf); alreadyMerged.addAll(srcFiles); } @@ -226,12 +225,11 @@ public class HRegion implements HConstants { LOG.debug("merging changes since start of merge"); } - for(Iterator it = filesToMerge.keySet().iterator(); it.hasNext(); ) { - Text colFamily = it.next(); - Vector srcFiles = filesToMerge.get(colFamily); - HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, - colFamily, Math.abs(rand.nextLong())); - + for (Map.Entry> es : filesToMerge.entrySet()) { + Text colFamily = es.getKey(); + Vector srcFiles = es.getValue(); + HStoreFile dst = new HStoreFile(conf, merges, + newRegionInfo.regionName, colFamily, Math.abs(rand.nextLong())); dst.mergeStoreFiles(srcFiles, fs, conf); } @@ -268,7 +266,7 @@ public class HRegion implements HConstants { HRegionInfo regionInfo; Path regiondir; - class WriteState { + static class WriteState { public volatile boolean writesOngoing; public volatile boolean writesEnabled; public volatile boolean closed; @@ -1248,7 +1246,7 @@ public class HRegion implements HConstants { /******************************************************************************* * HScanner is an iterator through a bunch of rows in an HRegion. ******************************************************************************/ - private class HScanner implements HInternalScannerInterface { + private static class HScanner implements HInternalScannerInterface { private HInternalScannerInterface[] scanners; private TreeMap[] resultSets; private HStoreKey[] keys; diff --git a/src/java/org/apache/hadoop/hbase/HRegionServer.java b/src/java/org/apache/hadoop/hbase/HRegionServer.java index 3946aeeb72a..5af373ef73d 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/HRegionServer.java @@ -64,7 +64,7 @@ public class HRegionServer private long splitOrCompactCheckFrequency; private SplitOrCompactChecker splitOrCompactChecker; private Thread splitOrCompactCheckerThread; - private Integer splitOrCompactLock = new Integer(0); + private Integer splitOrCompactLock = 0; private class SplitOrCompactChecker implements Runnable, RegionUnavailableListener { private HClient client = new HClient(conf); @@ -222,7 +222,7 @@ public class HRegionServer private Flusher cacheFlusher; private Thread cacheFlusherThread; - private Integer cacheFlusherLock = new Integer(0); + private Integer cacheFlusherLock = 0; private class Flusher implements Runnable { public void run() { while(! stopRequested) { @@ -291,7 +291,7 @@ public class HRegionServer private HLog log; private LogRoller logRoller; private Thread logRollerThread; - private Integer logRollerLock = new Integer(0); + private Integer logRollerLock = 0; private class LogRoller implements Runnable { public void run() { while(! stopRequested) { @@ -388,7 +388,7 @@ public class HRegionServer try { // Server to handle client requests - this.server = RPC.getServer(this, address.getBindAddress().toString(), + this.server = RPC.getServer(this, address.getBindAddress(), address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), false, conf); @@ -509,10 +509,11 @@ public class HRegionServer if (LOG.isDebugEnabled()) { LOG.debug("Sleep"); } - synchronized(this) { + synchronized (this) { try { - Thread.sleep(waitTime); - } catch(InterruptedException iex) { + wait(waitTime); + } catch (InterruptedException e1) { + // Go back up to the while test if stop has been requested. } } } @@ -588,10 +589,11 @@ public class HRegionServer if (LOG.isDebugEnabled()) { LOG.debug("Sleep"); } - synchronized(this) { + synchronized (this) { try { - Thread.sleep(waitTime); + wait(waitTime); } catch(InterruptedException iex) { + // On interrupt we go around to the while test of stopRequested } } if (LOG.isDebugEnabled()) { @@ -927,10 +929,8 @@ public class HRegionServer TreeMap map = region.getFull(row); LabelledData result[] = new LabelledData[map.size()]; int counter = 0; - for(Iterator it = map.keySet().iterator(); it.hasNext(); ) { - Text colname = it.next(); - BytesWritable val = map.get(colname); - result[counter++] = new LabelledData(colname, val); + for (Map.Entry es: map.entrySet()) { + result[counter++] = new LabelledData(es.getKey(), es.getValue()); } return result; } @@ -939,7 +939,7 @@ public class HRegionServer * Start an update to the HBase. This also creates a lease associated with * the caller. */ - private class RegionListener extends LeaseListener { + private static class RegionListener extends LeaseListener { private HRegion localRegion; private long localLockId; @@ -1180,4 +1180,4 @@ public class HRegionServer printUsageAndExit(); } } -} \ No newline at end of file +} diff --git a/src/java/org/apache/hadoop/hbase/HRegiondirReader.java b/src/java/org/apache/hadoop/hbase/HRegiondirReader.java index a006207e11b..9844e072cfd 100644 --- a/src/java/org/apache/hadoop/hbase/HRegiondirReader.java +++ b/src/java/org/apache/hadoop/hbase/HRegiondirReader.java @@ -19,6 +19,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -181,12 +182,14 @@ public class HRegiondirReader { // Every line starts with row name followed by column name // followed by cell content. while(scanner.next(key, results)) { - for (Text colname: results.keySet()) { + for (Map.Entry es: results.entrySet()) { + Text colname = es.getKey(); + BytesWritable colvalue = es.getValue(); Object value = null; - byte[] bytes = new byte[results.get(colname).getSize()]; + byte[] bytes = new byte[colvalue.getSize()]; if (colname.toString().equals("info:regioninfo")) { // Then bytes are instance of an HRegionInfo. - System.arraycopy(results.get(colname).get(), 0, bytes, 0, bytes.length); + System.arraycopy(colvalue, 0, bytes, 0, bytes.length); value = new HRegionInfo(bytes); } else { value = new String(bytes, HConstants.UTF8_ENCODING); diff --git a/src/java/org/apache/hadoop/hbase/HServerAddress.java b/src/java/org/apache/hadoop/hbase/HServerAddress.java index 028b91b0614..9514975bd22 100644 --- a/src/java/org/apache/hadoop/hbase/HServerAddress.java +++ b/src/java/org/apache/hadoop/hbase/HServerAddress.java @@ -35,8 +35,8 @@ public class HServerAddress implements Writable { public HServerAddress(InetSocketAddress address) { this.address = address; - this.stringValue = new String(address.getAddress().getHostAddress() - + ":" + address.getPort()); + this.stringValue = address.getAddress().getHostAddress() + ":" + + address.getPort(); } public HServerAddress(String hostAndPort) { @@ -47,19 +47,19 @@ public class HServerAddress implements Writable { String host = hostAndPort.substring(0, colonIndex); int port = Integer.valueOf(hostAndPort.substring(colonIndex + 1)); this.address = new InetSocketAddress(host, port); - this.stringValue = new String(hostAndPort); + this.stringValue = hostAndPort; } public HServerAddress(String bindAddress, int port) { this.address = new InetSocketAddress(bindAddress, port); - this.stringValue = new String(bindAddress + ":" + port); + this.stringValue = bindAddress + ":" + port; } public HServerAddress(HServerAddress other) { String bindAddress = other.getBindAddress(); int port = other.getPort(); address = new InetSocketAddress(bindAddress, port); - stringValue = new String(bindAddress + ":" + port); + stringValue = bindAddress + ":" + port; } public String getBindAddress() { diff --git a/src/java/org/apache/hadoop/hbase/HStore.java b/src/java/org/apache/hadoop/hbase/HStore.java index 565a997585c..aa3b64d6cc6 100644 --- a/src/java/org/apache/hadoop/hbase/HStore.java +++ b/src/java/org/apache/hadoop/hbase/HStore.java @@ -61,8 +61,8 @@ public class HStore { Path compactdir; Path loginfodir; - Integer compactLock = new Integer(0); - Integer flushLock = new Integer(0); + Integer compactLock = 0; + Integer flushLock = 0; private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -225,13 +225,10 @@ public class HStore { if(LOG.isDebugEnabled()) { LOG.debug("starting map readers"); } - for(Iterator it = mapFiles.keySet().iterator(); it.hasNext(); ) { - Long key = it.next().longValue(); - HStoreFile hsf = mapFiles.get(key); - + for(Map.Entry e: mapFiles.entrySet()) { // TODO - is this really necessary? Don't I do this inside compact()? - maps.put(key, - new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf)); + maps.put(e.getKey(), + new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf)); } LOG.info("HStore online for " + this.regionName + "/" + this.colFamily); @@ -239,19 +236,16 @@ public class HStore { /** Turn off all the MapFile readers */ public void close() throws IOException { - this.lock.writeLock().lock(); LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily); - + this.lock.writeLock().lock(); try { - for(Iterator it = maps.values().iterator(); it.hasNext(); ) { - MapFile.Reader map = it.next(); + for (MapFile.Reader map: maps.values()) { map.close(); } maps.clear(); mapFiles.clear(); LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily); - } finally { this.lock.writeLock().unlock(); } @@ -300,10 +294,10 @@ public class HStore { HStoreKey.class, BytesWritable.class); try { - for (HStoreKey curkey: inputCache.keySet()) { - if(this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) { - BytesWritable val = inputCache.get(curkey); - out.append(curkey, val); + for (Map.Entry es: inputCache.entrySet()) { + HStoreKey curkey = es.getKey(); + if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) { + out.append(curkey, es.getValue()); } } if(LOG.isDebugEnabled()) { @@ -631,8 +625,9 @@ public class HStore { // 1. Acquiring the write-lock - this.lock.writeLock().lock(); + Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily); + this.lock.writeLock().lock(); try { Path doneFile = new Path(curCompactStore, COMPACTION_DONE); if(! fs.exists(doneFile)) { @@ -918,10 +913,10 @@ public class HStore { /** Generate a random unique filename suffix */ String obtainFileLabel(Path prefix) throws IOException { - String testsuffix = String.valueOf(Math.abs(rand.nextInt())); + String testsuffix = String.valueOf(rand.nextInt(Integer.MAX_VALUE)); Path testpath = new Path(prefix.toString() + testsuffix); while(fs.exists(testpath)) { - testsuffix = String.valueOf(Math.abs(rand.nextInt())); + testsuffix = String.valueOf(rand.nextInt(Integer.MAX_VALUE)); testpath = new Path(prefix.toString() + testsuffix); } return testsuffix; diff --git a/src/java/org/apache/hadoop/hbase/HStoreFile.java b/src/java/org/apache/hadoop/hbase/HStoreFile.java index 881769219f5..bab3963c591 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreFile.java +++ b/src/java/org/apache/hadoop/hbase/HStoreFile.java @@ -326,6 +326,19 @@ public class HStoreFile implements HConstants, WritableComparable { out.close(); } } + + public boolean equals(Object o) { + return this.compareTo(o) == 0; + } + + @Override + public int hashCode() { + int result = this.dir.hashCode(); + result ^= this.regionName.hashCode(); + result ^= this.colFamily.hashCode(); + result ^= Long.valueOf(this.fileId).hashCode(); + return result; + } ////////////////////////////////////////////////////////////////////////////// // Writable @@ -368,11 +381,4 @@ public class HStoreFile implements HConstants, WritableComparable { } return result; } - - - public boolean equals(Object o) { - return this.compareTo(o) == 0; - } -} - - +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/HStoreKey.java b/src/java/org/apache/hadoop/hbase/HStoreKey.java index 27f24f465f4..5edc611bc19 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/HStoreKey.java @@ -15,6 +15,8 @@ */ package org.apache.hadoop.hbase; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.*; import java.io.*; @@ -23,6 +25,8 @@ import java.io.*; * A Key for a stored row ******************************************************************************/ public class HStoreKey implements WritableComparable { + private final Log LOG = LogFactory.getLog(this.getClass().getName()); + public static Text extractFamily(Text col) throws IOException { String column = col.toString(); int colpos = column.indexOf(":"); @@ -128,6 +132,7 @@ public class HStoreKey implements WritableComparable { extractFamily(other.getColumn())) == 0; } catch(IOException e) { + LOG.error(e); } return status; } @@ -135,6 +140,19 @@ public class HStoreKey implements WritableComparable { public String toString() { return row.toString() + "/" + column.toString() + "/" + timestamp; } + + @Override + public boolean equals(Object obj) { + return compareTo(obj) == 0; + } + + @Override + public int hashCode() { + int result = this.row.hashCode(); + result ^= this.column.hashCode(); + result ^= Long.valueOf(this.timestamp).hashCode(); + return result; + } ////////////////////////////////////////////////////////////////////////////// // Comparable diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index ea671c55157..23078e06c61 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -124,6 +124,24 @@ public class HTableDescriptor implements WritableComparable { ", maxVersions: " + this.maxVersions + ", families: " + this.families; } + @Override + public boolean equals(Object obj) { + return compareTo(obj) == 0; + } + + @Override + public int hashCode() { + // TODO: Cache. + int result = this.name.hashCode(); + result ^= Integer.valueOf(this.maxVersions).hashCode(); + if (this.families != null && this.families.size() > 0) { + for (Text family: this.families) { + result ^= family.hashCode(); + } + } + return result; + } + ////////////////////////////////////////////////////////////////////////////// // Writable ////////////////////////////////////////////////////////////////////////////// diff --git a/src/java/org/apache/hadoop/hbase/Leases.java b/src/java/org/apache/hadoop/hbase/Leases.java index 97cd23da08d..7a6ebc4a56c 100644 --- a/src/java/org/apache/hadoop/hbase/Leases.java +++ b/src/java/org/apache/hadoop/hbase/Leases.java @@ -71,6 +71,7 @@ public class Leases { this.leaseMonitorThread.interrupt(); this.leaseMonitorThread.join(); } catch (InterruptedException iex) { + // Ignore } synchronized(leases) { synchronized(sortedLeases) { @@ -166,6 +167,7 @@ public class Leases { try { Thread.sleep(leaseCheckFrequency); } catch (InterruptedException ie) { + // Ignore } } } @@ -211,6 +213,18 @@ public class Leases { listener.leaseExpired(); } + @Override + public boolean equals(Object obj) { + return compareTo(obj) == 0; + } + + @Override + public int hashCode() { + int result = this.getLeaseId().hashCode(); + result ^= Long.valueOf(this.lastUpdate).hashCode(); + return result; + } + ////////////////////////////////////////////////////////////////////////////// // Comparable //////////////////////////////////////////////////////////////////////////////