From 47bf7fd8bbaca22a89e7429e52dcac5a5126d38a Mon Sep 17 00:00:00 2001 From: Jim Kellerman Date: Tue, 22 May 2007 05:30:07 +0000 Subject: [PATCH] HADOOP-1397. Replace custom hbase locking with java.util.concurrent.locks.ReentrantLock git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@540424 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 2 + src/java/org/apache/hadoop/hbase/HRegion.java | 42 ++++++------- .../apache/hadoop/hbase/HRegionServer.java | 63 ++++++++++--------- src/java/org/apache/hadoop/hbase/HStore.java | 43 ++++++------- 4 files changed, 76 insertions(+), 74 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 9850192c859..782c713881b 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -6,3 +6,5 @@ Trunk (unreleased changes) 1. HADOOP-1384. HBase omnibus patch. (jimk, Vuk Ercegovac, and Michael Stack) 2. HADOOP-1402. Fix javadoc warnings in hbase contrib. (Michael Stack) 3. HADOOP-1404. HBase command-line shutdown failing (Michael Stack) + 4. HADOOP-1397. Replace custom hbase locking with + java.util.concurrent.locks.ReentrantLock (Michael Stack) diff --git a/src/java/org/apache/hadoop/hbase/HRegion.java b/src/java/org/apache/hadoop/hbase/HRegion.java index 148a2b54d84..c7ec0d22b04 100644 --- a/src/java/org/apache/hadoop/hbase/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/HRegion.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.*; import java.io.*; import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; /** * HRegion stores data for a certain region of a table. It stores all columns @@ -284,7 +285,7 @@ public class HRegion implements HConstants { int maxUnflushedEntries = 0; int compactionThreshold = 0; - HLocking lock = null; + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); ////////////////////////////////////////////////////////////////////////////// // Constructor @@ -322,8 +323,6 @@ public class HRegion implements HConstants { this.writestate.writesOngoing = true; this.writestate.writesEnabled = true; this.writestate.closed = false; - - this.lock = new HLocking(); // Declare the regionName. This is a unique string for the region, used to // build a unique filename. @@ -401,7 +400,7 @@ public class HRegion implements HConstants { * time-sensitive thread. */ public Vector close() throws IOException { - lock.obtainWriteLock(); + lock.writeLock().lock(); try { boolean shouldClose = false; synchronized(writestate) { @@ -441,7 +440,7 @@ public class HRegion implements HConstants { } } } finally { - lock.releaseWriteLock(); + lock.writeLock().unlock(); } } @@ -617,7 +616,7 @@ public class HRegion implements HConstants { * @return - true if the region should be split */ public boolean needsSplit(Text midKey) { - lock.obtainReadLock(); + lock.readLock().lock(); try { Text key = new Text(); @@ -635,7 +634,7 @@ public class HRegion implements HConstants { return (maxSize > (DESIRED_MAX_FILE_SIZE + (DESIRED_MAX_FILE_SIZE / 2))); } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } } @@ -644,7 +643,7 @@ public class HRegion implements HConstants { */ public boolean needsCompaction() { boolean needsCompaction = false; - lock.obtainReadLock(); + lock.readLock().lock(); try { for(Iterator i = stores.values().iterator(); i.hasNext(); ) { if(i.next().getNMaps() > compactionThreshold) { @@ -653,7 +652,7 @@ public class HRegion implements HConstants { } } } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } return needsCompaction; } @@ -673,7 +672,7 @@ public class HRegion implements HConstants { */ public boolean compactStores() throws IOException { boolean shouldCompact = false; - lock.obtainReadLock(); + lock.readLock().lock(); try { synchronized(writestate) { if((! writestate.writesOngoing) @@ -686,7 +685,7 @@ public class HRegion implements HConstants { } } } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } if(! shouldCompact) { @@ -694,7 +693,7 @@ public class HRegion implements HConstants { return false; } else { - lock.obtainWriteLock(); + lock.writeLock().lock(); try { LOG.info("starting compaction on region " + this.regionInfo.regionName); for(Iterator it = stores.values().iterator(); it.hasNext(); ) { @@ -710,7 +709,7 @@ public class HRegion implements HConstants { recentCommits = 0; writestate.notifyAll(); } - lock.releaseWriteLock(); + lock.writeLock().unlock(); } } } @@ -931,7 +930,7 @@ public class HRegion implements HConstants { private BytesWritable[] get(HStoreKey key, int numVersions) throws IOException { - lock.obtainReadLock(); + lock.readLock().lock(); try { // Check the memcache @@ -951,7 +950,7 @@ public class HRegion implements HConstants { return targetStore.get(key, numVersions); } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } } @@ -968,7 +967,7 @@ public class HRegion implements HConstants { public TreeMap getFull(Text row) throws IOException { HStoreKey key = new HStoreKey(row, System.currentTimeMillis()); - lock.obtainReadLock(); + lock.readLock().lock(); try { TreeMap memResult = memcache.getFull(key); for(Iterator it = stores.keySet().iterator(); it.hasNext(); ) { @@ -979,7 +978,7 @@ public class HRegion implements HConstants { return memResult; } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } } @@ -988,7 +987,7 @@ public class HRegion implements HConstants { * columns. This Iterator must be closed by the caller. */ public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws IOException { - lock.obtainReadLock(); + lock.readLock().lock(); try { TreeSet families = new TreeSet(); for(int i = 0; i < cols.length; i++) { @@ -1004,7 +1003,7 @@ public class HRegion implements HConstants { return new HScanner(cols, firstRow, memcache, storelist); } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } } @@ -1027,12 +1026,11 @@ public class HRegion implements HConstants { // We obtain a per-row lock, so other clients will // block while one client performs an update. - lock.obtainReadLock(); + lock.readLock().lock(); try { return obtainLock(row); - } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } } diff --git a/src/java/org/apache/hadoop/hbase/HRegionServer.java b/src/java/org/apache/hadoop/hbase/HRegionServer.java index 6f6c545e804..b0b2af8f9db 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/HRegionServer.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.*; import java.io.*; import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; /******************************************************************************* * HRegionServer makes a set of HRegions available to clients. It checks in with @@ -50,7 +51,7 @@ public class HRegionServer private Configuration conf; private Random rand; private TreeMap regions; // region name -> HRegion - private HLocking lock; + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private Vector outboundMsgs; private long threadWakeFrequency; @@ -71,9 +72,12 @@ public class HRegionServer * @see org.apache.hadoop.hbase.RegionUnavailableListener#regionIsUnavailable(org.apache.hadoop.io.Text) */ public void regionIsUnavailable(Text regionName) { - lock.obtainWriteLock(); - regions.remove(regionName); - lock.releaseWriteLock(); + lock.writeLock().lock(); + try { + regions.remove(regionName); + } finally { + lock.writeLock().unlock(); + } } /* (non-Javadoc) @@ -88,11 +92,11 @@ public class HRegionServer // Grab a list of regions to check Vector regionsToCheck = new Vector(); - lock.obtainReadLock(); + lock.readLock().lock(); try { regionsToCheck.addAll(regions.values()); } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } try { @@ -163,10 +167,13 @@ public class HRegionServer // Finally, start serving the new regions - lock.obtainWriteLock(); - regions.put(newRegions[0].getRegionName(), newRegions[0]); - regions.put(newRegions[1].getRegionName(), newRegions[1]); - lock.releaseWriteLock(); + lock.writeLock().lock(); + try { + regions.put(newRegions[0].getRegionName(), newRegions[0]); + regions.put(newRegions[1].getRegionName(), newRegions[1]); + } finally { + lock.writeLock().unlock(); + } } } } @@ -214,12 +221,11 @@ public class HRegionServer // Grab a list of items to flush Vector toFlush = new Vector(); - lock.obtainReadLock(); + lock.readLock().lock(); try { toFlush.addAll(regions.values()); - } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); } // Flush them, if necessary @@ -340,7 +346,6 @@ public class HRegionServer this.conf = conf; this.rand = new Random(); this.regions = new TreeMap(); - this.lock = new HLocking(); this.outboundMsgs = new Vector(); this.scanners = Collections.synchronizedMap(new TreeMap()); @@ -752,27 +757,26 @@ public class HRegionServer } private void openRegion(HRegionInfo regionInfo) throws IOException { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { HRegion region = new HRegion(regionDir, log, fs, conf, regionInfo, null, oldlogfile); regions.put(region.getRegionName(), region); - reportOpen(region); - + reportOpen(region); } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } private void closeRegion(HRegionInfo info, boolean reportWhenCompleted) throws IOException { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); HRegion region = null; try { region = regions.remove(info.regionName); } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } if(region != null) { @@ -785,13 +789,12 @@ public class HRegionServer } private void closeAndDeleteRegion(HRegionInfo info) throws IOException { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); HRegion region = null; try { region = regions.remove(info.regionName); - } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } if(region != null) { if(LOG.isDebugEnabled()) { @@ -809,13 +812,12 @@ public class HRegionServer /** Called either when the master tells us to restart or from stop() */ private void closeAllRegions() { Vector regionsToClose = new Vector(); - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { regionsToClose.addAll(regions.values()); regions.clear(); - } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } for(Iterator it = regionsToClose.iterator(); it.hasNext(); ) { HRegion region = it.next(); @@ -842,7 +844,7 @@ public class HRegionServer ****************************************************************************/ /* private void mergeRegions(Text regionNameA, Text regionNameB) throws IOException { - locking.obtainWriteLock(); + locking.writeLock().lock(); try { HRegion srcA = regions.remove(regionNameA); HRegion srcB = regions.remove(regionNameB); @@ -854,7 +856,7 @@ public class HRegionServer reportOpen(newRegion); } finally { - locking.releaseWriteLock(); + locking.writeLock().unlock(); } } */ @@ -1016,13 +1018,12 @@ public class HRegionServer /** Private utility method for safely obtaining an HRegion handle. */ private HRegion getRegion(Text regionName) throws NotServingRegionException { - this.lock.obtainReadLock(); + this.lock.readLock().lock(); HRegion region = null; try { region = regions.get(regionName); - } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } if(region == null) { diff --git a/src/java/org/apache/hadoop/hbase/HStore.java b/src/java/org/apache/hadoop/hbase/HStore.java index e7c7dfd16f8..565a997585c 100644 --- a/src/java/org/apache/hadoop/hbase/HStore.java +++ b/src/java/org/apache/hadoop/hbase/HStore.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.Random; import java.util.TreeMap; import java.util.Vector; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -63,7 +64,7 @@ public class HStore { Integer compactLock = new Integer(0); Integer flushLock = new Integer(0); - HLocking lock = new HLocking(); + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); TreeMap maps = new TreeMap(); TreeMap mapFiles = new TreeMap(); @@ -238,7 +239,7 @@ public class HStore { /** Turn off all the MapFile readers */ public void close() throws IOException { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily); try { @@ -252,7 +253,7 @@ public class HStore { LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily); } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } @@ -324,7 +325,7 @@ public class HStore { // C. Finally, make the new MapFile available. if(addToAvailableMaps) { - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { maps.put(logCacheFlushId, new MapFile.Reader(fs, mapfile.toString(), conf)); @@ -335,7 +336,7 @@ public class HStore { } } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } return getAllMapFiles(); @@ -343,12 +344,12 @@ public class HStore { } public Vector getAllMapFiles() { - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { return new Vector(mapFiles.values()); } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } @@ -390,12 +391,12 @@ public class HStore { // Grab a list of files to compact. Vector toCompactFiles = null; - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); try { toCompactFiles = new Vector(mapFiles.values()); } finally { - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps @@ -630,7 +631,7 @@ public class HStore { // 1. Acquiring the write-lock - this.lock.obtainWriteLock(); + this.lock.writeLock().lock(); Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily); try { Path doneFile = new Path(curCompactStore, COMPACTION_DONE); @@ -748,7 +749,7 @@ public class HStore { // 7. Releasing the write-lock - this.lock.releaseWriteLock(); + this.lock.writeLock().unlock(); } } @@ -764,7 +765,7 @@ public class HStore { * The returned object should map column names to byte arrays (byte[]). */ public void getFull(HStoreKey key, TreeMap results) throws IOException { - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { MapFile.Reader[] maparray = maps.values().toArray(new MapFile.Reader[maps.size()]); @@ -793,7 +794,7 @@ public class HStore { } } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } @@ -809,7 +810,7 @@ public class HStore { } Vector results = new Vector(); - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { MapFile.Reader[] maparray = maps.values().toArray(new MapFile.Reader[maps.size()]); @@ -850,7 +851,7 @@ public class HStore { } } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } @@ -866,7 +867,7 @@ public class HStore { return maxSize; } - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { long mapIndex = 0L; @@ -893,7 +894,7 @@ public class HStore { LOG.warn(e); } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } return maxSize; } @@ -902,12 +903,12 @@ public class HStore { * @return Returns the number of map files currently in use */ public int getNMaps() { - this.lock.obtainReadLock(); + this.lock.readLock().lock(); try { return maps.size(); } finally { - this.lock.releaseReadLock(); + this.lock.readLock().unlock(); } } @@ -949,7 +950,7 @@ public class HStore { super(timestamp, targetCols); - lock.obtainReadLock(); + lock.readLock().lock(); try { this.readers = new MapFile.Reader[mapFiles.size()]; @@ -1064,7 +1065,7 @@ public class HStore { } } finally { - lock.releaseReadLock(); + lock.readLock().unlock(); scannerClosed = true; } }