diff --git a/CHANGES.txt b/CHANGES.txt index 1346ad9df00..75e11c550ac 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -22,6 +22,9 @@ Hbase Change Log HBASE-446 Fully qualified hbase.rootdir doesn't work HBASE-438 XMLOutputter state should be initialized. (Edward Yoon via Stack) HBASE-8 Delete table does not remove the table directory in the FS + HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown + that reach the client even after retries + IMPROVEMENTS HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling diff --git a/src/java/org/apache/hadoop/hbase/HRegion.java b/src/java/org/apache/hadoop/hbase/HRegion.java index 539925c85dc..88ef1cc925b 100644 --- a/src/java/org/apache/hadoop/hbase/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/HRegion.java @@ -536,7 +536,7 @@ public class HRegion implements HConstants { HStore.HStoreSize largestHStore(Text midkey) { HStore.HStoreSize biggest = null; boolean splitable = true; - for(HStore h: stores.values()) { + for (HStore h: stores.values()) { HStore.HStoreSize size = h.size(midkey); // If we came across a reference down in the store, then propagate // fact that region is not splitable. @@ -577,14 +577,25 @@ public class HRegion implements HConstants { if(!this.fs.exists(splits)) { this.fs.mkdirs(splits); } + // Make copies just in case and add start/end key checking: hbase-428. + Text startKey = new Text(this.regionInfo.getStartKey()); + Text endKey = new Text(this.regionInfo.getEndKey()); + if (startKey.equals(midKey)) { + LOG.debug("Startkey and midkey are same, not splitting"); + return null; + } + if (midKey.equals(endKey)) { + LOG.debug("Endkey and midkey are same, not splitting"); + return null; + } HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(), - this.regionInfo.getStartKey(), midKey); + startKey, midKey); Path dirA = new Path(splits, regionAInfo.getEncodedName()); if(fs.exists(dirA)) { throw new IOException("Cannot split; target file collision at " + dirA); } HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(), - midKey, this.regionInfo.getEndKey()); + midKey, endKey); Path dirB = new Path(splits, regionBInfo.getEncodedName()); if(this.fs.exists(dirB)) { throw new IOException("Cannot split; target file collision at " + dirB); diff --git a/src/java/org/apache/hadoop/hbase/HRegionServer.java b/src/java/org/apache/hadoop/hbase/HRegionServer.java index 10507ccc938..c94ad529b01 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/HRegionServer.java @@ -334,11 +334,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { update.put(COL_REGIONINFO, Writables.getBytes( newRegions[i].getRegionInfo())); t.commit(update); - -/* long lockid = t.startUpdate(newRegions[i].getRegionName()); - t.put(lockid, COL_REGIONINFO, Writables.getBytes( - newRegions[i].getRegionInfo())); - t.commit(lockid);*/ } // Now tell the master about the new regions @@ -348,9 +343,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo()); LOG.info("region split, META updated, and report to master all" + - " successful. Old region=" + oldRegionInfo.getRegionName() + - ", new regions: " + newRegions[0].getRegionName() + ", " + - newRegions[1].getRegionName() + ". Split took " + + " successful. Old region=" + oldRegionInfo.toString() + + ", new regions: " + newRegions[0].toString() + ", " + + newRegions[1].toString() + ". Split took " + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); // Do not serve the new regions. Let the Master assign them. diff --git a/src/java/org/apache/hadoop/hbase/HStore.java b/src/java/org/apache/hadoop/hbase/HStore.java index b768eec456e..f76d1e1f473 100644 --- a/src/java/org/apache/hadoop/hbase/HStore.java +++ b/src/java/org/apache/hadoop/hbase/HStore.java @@ -243,12 +243,11 @@ public class HStore implements HConstants { if ( (key_memcache != null && key_memcache.equals(row)) || (key_snapshot != null && key_snapshot.equals(row)) ) { return row; - } else { - // no precise matches, so return the one that is closer to the search - // key (greatest) - return key_memcache.compareTo(key_snapshot) > 0 ? - key_memcache : key_snapshot; - } + } + // no precise matches, so return the one that is closer to the search + // key (greatest) + return key_memcache.compareTo(key_snapshot) > 0 ? + key_memcache : key_snapshot; } } finally { this.lock.readLock().unlock(); @@ -293,13 +292,7 @@ public class HStore implements HConstants { // the tail didn't contain the key we're searching for, so we should // use the last key in the headmap as the closest before SortedMap headMap = map.headMap(search_key); - if (headMap.isEmpty()) { -/* LOG.debug("Went searching for " + key + ", found nothing!");*/ - return null; - } else { -/* LOG.debug("Went searching for " + key + ", found " + headMap.lastKey().getRow());*/ - return headMap.lastKey().getRow(); - } + return headMap.isEmpty()? null: headMap.lastKey().getRow(); } /** @@ -1836,9 +1829,7 @@ public class HStore implements HConstants { MapFile.Reader[] maparray = getReaders(); Text bestSoFar = null; - - HStoreKey rowKey = new HStoreKey(row, timestamp); - + // process each store file for(int i = maparray.length - 1; i >= 0; i--) { Text row_from_mapfile = @@ -2025,7 +2016,7 @@ public class HStore implements HConstants { try { Long mapIndex = Long.valueOf(0L); // Iterate through all the MapFiles - for(Map.Entry e: storefiles.entrySet()) { + for (Map.Entry e: storefiles.entrySet()) { HStoreFile curHSF = e.getValue(); long size = curHSF.length(); aggregateSize += size; @@ -2038,29 +2029,28 @@ public class HStore implements HConstants { splitable = !curHSF.isReference(); } } - MapFile.Reader r = this.readers.get(mapIndex); - - // seek back to the beginning of mapfile - r.reset(); - - // get the first and last keys - HStoreKey firstKey = new HStoreKey(); - HStoreKey lastKey = new HStoreKey(); - Writable value = new ImmutableBytesWritable(); - r.next((WritableComparable)firstKey, value); - r.finalKey((WritableComparable)lastKey); - - // get the midkey - HStoreKey midkey = (HStoreKey)r.midKey(); - - if (midkey != null) { - midKey.set(((HStoreKey)midkey).getRow()); - // if the midkey is the same as the first and last keys, then we cannot - // (ever) split this region. - if (midkey.getRow().equals(firstKey.getRow()) && - midkey.getRow().equals(lastKey.getRow())) { - return new HStoreSize(aggregateSize, maxSize, false); - } + if (splitable) { + MapFile.Reader r = this.readers.get(mapIndex); + // seek back to the beginning of mapfile + r.reset(); + // get the first and last keys + HStoreKey firstKey = new HStoreKey(); + HStoreKey lastKey = new HStoreKey(); + Writable value = new ImmutableBytesWritable(); + r.next(firstKey, value); + r.finalKey(lastKey); + // get the midkey + HStoreKey mk = (HStoreKey)r.midKey(); + if (mk != null) { + // if the midkey is the same as the first and last keys, then we cannot + // (ever) split this region. + if (mk.getRow().equals(firstKey.getRow()) && + mk.getRow().equals(lastKey.getRow())) { + return new HStoreSize(aggregateSize, maxSize, false); + } + // Otherwise, set midKey + midKey.set(mk.getRow()); + } } } catch(IOException e) { LOG.warn("Failed getting store size for " + this.storeName, e); diff --git a/src/java/org/apache/hadoop/hbase/HStoreFile.java b/src/java/org/apache/hadoop/hbase/HStoreFile.java index f2c040d8b4e..e89851e131f 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreFile.java +++ b/src/java/org/apache/hadoop/hbase/HStoreFile.java @@ -861,12 +861,10 @@ public class HStoreFile implements HConstants { public synchronized void finalKey(WritableComparable key) throws IOException { if (top) { - checkKey(key); super.finalKey(key); } else { reset(); Writable value = new ImmutableBytesWritable(); - key = super.getClosest(midkey, value, true); } }