HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
that reach the client even after retries Applied to TRUNK and branch. git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@629192 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e714fed123
commit
377fb93ca7
|
@ -22,6 +22,9 @@ Hbase Change Log
|
||||||
HBASE-446 Fully qualified hbase.rootdir doesn't work
|
HBASE-446 Fully qualified hbase.rootdir doesn't work
|
||||||
HBASE-438 XMLOutputter state should be initialized. (Edward Yoon via Stack)
|
HBASE-438 XMLOutputter state should be initialized. (Edward Yoon via Stack)
|
||||||
HBASE-8 Delete table does not remove the table directory in the FS
|
HBASE-8 Delete table does not remove the table directory in the FS
|
||||||
|
HBASE-428 Under continuous upload of rows, WrongRegionExceptions are thrown
|
||||||
|
that reach the client even after retries
|
||||||
|
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
HBASE-415 Rewrite leases to use DelayedBlockingQueue instead of polling
|
||||||
|
|
|
@ -536,7 +536,7 @@ public class HRegion implements HConstants {
|
||||||
HStore.HStoreSize largestHStore(Text midkey) {
|
HStore.HStoreSize largestHStore(Text midkey) {
|
||||||
HStore.HStoreSize biggest = null;
|
HStore.HStoreSize biggest = null;
|
||||||
boolean splitable = true;
|
boolean splitable = true;
|
||||||
for(HStore h: stores.values()) {
|
for (HStore h: stores.values()) {
|
||||||
HStore.HStoreSize size = h.size(midkey);
|
HStore.HStoreSize size = h.size(midkey);
|
||||||
// If we came across a reference down in the store, then propagate
|
// If we came across a reference down in the store, then propagate
|
||||||
// fact that region is not splitable.
|
// fact that region is not splitable.
|
||||||
|
@ -577,14 +577,25 @@ public class HRegion implements HConstants {
|
||||||
if(!this.fs.exists(splits)) {
|
if(!this.fs.exists(splits)) {
|
||||||
this.fs.mkdirs(splits);
|
this.fs.mkdirs(splits);
|
||||||
}
|
}
|
||||||
|
// Make copies just in case and add start/end key checking: hbase-428.
|
||||||
|
Text startKey = new Text(this.regionInfo.getStartKey());
|
||||||
|
Text endKey = new Text(this.regionInfo.getEndKey());
|
||||||
|
if (startKey.equals(midKey)) {
|
||||||
|
LOG.debug("Startkey and midkey are same, not splitting");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (midKey.equals(endKey)) {
|
||||||
|
LOG.debug("Endkey and midkey are same, not splitting");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
||||||
this.regionInfo.getStartKey(), midKey);
|
startKey, midKey);
|
||||||
Path dirA = new Path(splits, regionAInfo.getEncodedName());
|
Path dirA = new Path(splits, regionAInfo.getEncodedName());
|
||||||
if(fs.exists(dirA)) {
|
if(fs.exists(dirA)) {
|
||||||
throw new IOException("Cannot split; target file collision at " + dirA);
|
throw new IOException("Cannot split; target file collision at " + dirA);
|
||||||
}
|
}
|
||||||
HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(),
|
||||||
midKey, this.regionInfo.getEndKey());
|
midKey, endKey);
|
||||||
Path dirB = new Path(splits, regionBInfo.getEncodedName());
|
Path dirB = new Path(splits, regionBInfo.getEncodedName());
|
||||||
if(this.fs.exists(dirB)) {
|
if(this.fs.exists(dirB)) {
|
||||||
throw new IOException("Cannot split; target file collision at " + dirB);
|
throw new IOException("Cannot split; target file collision at " + dirB);
|
||||||
|
|
|
@ -334,11 +334,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
update.put(COL_REGIONINFO, Writables.getBytes(
|
update.put(COL_REGIONINFO, Writables.getBytes(
|
||||||
newRegions[i].getRegionInfo()));
|
newRegions[i].getRegionInfo()));
|
||||||
t.commit(update);
|
t.commit(update);
|
||||||
|
|
||||||
/* long lockid = t.startUpdate(newRegions[i].getRegionName());
|
|
||||||
t.put(lockid, COL_REGIONINFO, Writables.getBytes(
|
|
||||||
newRegions[i].getRegionInfo()));
|
|
||||||
t.commit(lockid);*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now tell the master about the new regions
|
// Now tell the master about the new regions
|
||||||
|
@ -348,9 +343,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
|
reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(),
|
||||||
newRegions[1].getRegionInfo());
|
newRegions[1].getRegionInfo());
|
||||||
LOG.info("region split, META updated, and report to master all" +
|
LOG.info("region split, META updated, and report to master all" +
|
||||||
" successful. Old region=" + oldRegionInfo.getRegionName() +
|
" successful. Old region=" + oldRegionInfo.toString() +
|
||||||
", new regions: " + newRegions[0].getRegionName() + ", " +
|
", new regions: " + newRegions[0].toString() + ", " +
|
||||||
newRegions[1].getRegionName() + ". Split took " +
|
newRegions[1].toString() + ". Split took " +
|
||||||
StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
||||||
|
|
||||||
// Do not serve the new regions. Let the Master assign them.
|
// Do not serve the new regions. Let the Master assign them.
|
||||||
|
|
|
@ -243,12 +243,11 @@ public class HStore implements HConstants {
|
||||||
if ( (key_memcache != null && key_memcache.equals(row))
|
if ( (key_memcache != null && key_memcache.equals(row))
|
||||||
|| (key_snapshot != null && key_snapshot.equals(row)) ) {
|
|| (key_snapshot != null && key_snapshot.equals(row)) ) {
|
||||||
return row;
|
return row;
|
||||||
} else {
|
|
||||||
// no precise matches, so return the one that is closer to the search
|
|
||||||
// key (greatest)
|
|
||||||
return key_memcache.compareTo(key_snapshot) > 0 ?
|
|
||||||
key_memcache : key_snapshot;
|
|
||||||
}
|
}
|
||||||
|
// no precise matches, so return the one that is closer to the search
|
||||||
|
// key (greatest)
|
||||||
|
return key_memcache.compareTo(key_snapshot) > 0 ?
|
||||||
|
key_memcache : key_snapshot;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
|
@ -293,13 +292,7 @@ public class HStore implements HConstants {
|
||||||
// the tail didn't contain the key we're searching for, so we should
|
// the tail didn't contain the key we're searching for, so we should
|
||||||
// use the last key in the headmap as the closest before
|
// use the last key in the headmap as the closest before
|
||||||
SortedMap<HStoreKey, byte []> headMap = map.headMap(search_key);
|
SortedMap<HStoreKey, byte []> headMap = map.headMap(search_key);
|
||||||
if (headMap.isEmpty()) {
|
return headMap.isEmpty()? null: headMap.lastKey().getRow();
|
||||||
/* LOG.debug("Went searching for " + key + ", found nothing!");*/
|
|
||||||
return null;
|
|
||||||
} else {
|
|
||||||
/* LOG.debug("Went searching for " + key + ", found " + headMap.lastKey().getRow());*/
|
|
||||||
return headMap.lastKey().getRow();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1837,8 +1830,6 @@ public class HStore implements HConstants {
|
||||||
|
|
||||||
Text bestSoFar = null;
|
Text bestSoFar = null;
|
||||||
|
|
||||||
HStoreKey rowKey = new HStoreKey(row, timestamp);
|
|
||||||
|
|
||||||
// process each store file
|
// process each store file
|
||||||
for(int i = maparray.length - 1; i >= 0; i--) {
|
for(int i = maparray.length - 1; i >= 0; i--) {
|
||||||
Text row_from_mapfile =
|
Text row_from_mapfile =
|
||||||
|
@ -2025,7 +2016,7 @@ public class HStore implements HConstants {
|
||||||
try {
|
try {
|
||||||
Long mapIndex = Long.valueOf(0L);
|
Long mapIndex = Long.valueOf(0L);
|
||||||
// Iterate through all the MapFiles
|
// Iterate through all the MapFiles
|
||||||
for(Map.Entry<Long, HStoreFile> e: storefiles.entrySet()) {
|
for (Map.Entry<Long, HStoreFile> e: storefiles.entrySet()) {
|
||||||
HStoreFile curHSF = e.getValue();
|
HStoreFile curHSF = e.getValue();
|
||||||
long size = curHSF.length();
|
long size = curHSF.length();
|
||||||
aggregateSize += size;
|
aggregateSize += size;
|
||||||
|
@ -2038,28 +2029,27 @@ public class HStore implements HConstants {
|
||||||
splitable = !curHSF.isReference();
|
splitable = !curHSF.isReference();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MapFile.Reader r = this.readers.get(mapIndex);
|
if (splitable) {
|
||||||
|
MapFile.Reader r = this.readers.get(mapIndex);
|
||||||
// seek back to the beginning of mapfile
|
// seek back to the beginning of mapfile
|
||||||
r.reset();
|
r.reset();
|
||||||
|
// get the first and last keys
|
||||||
// get the first and last keys
|
HStoreKey firstKey = new HStoreKey();
|
||||||
HStoreKey firstKey = new HStoreKey();
|
HStoreKey lastKey = new HStoreKey();
|
||||||
HStoreKey lastKey = new HStoreKey();
|
Writable value = new ImmutableBytesWritable();
|
||||||
Writable value = new ImmutableBytesWritable();
|
r.next(firstKey, value);
|
||||||
r.next((WritableComparable)firstKey, value);
|
r.finalKey(lastKey);
|
||||||
r.finalKey((WritableComparable)lastKey);
|
// get the midkey
|
||||||
|
HStoreKey mk = (HStoreKey)r.midKey();
|
||||||
// get the midkey
|
if (mk != null) {
|
||||||
HStoreKey midkey = (HStoreKey)r.midKey();
|
// if the midkey is the same as the first and last keys, then we cannot
|
||||||
|
// (ever) split this region.
|
||||||
if (midkey != null) {
|
if (mk.getRow().equals(firstKey.getRow()) &&
|
||||||
midKey.set(((HStoreKey)midkey).getRow());
|
mk.getRow().equals(lastKey.getRow())) {
|
||||||
// if the midkey is the same as the first and last keys, then we cannot
|
return new HStoreSize(aggregateSize, maxSize, false);
|
||||||
// (ever) split this region.
|
}
|
||||||
if (midkey.getRow().equals(firstKey.getRow()) &&
|
// Otherwise, set midKey
|
||||||
midkey.getRow().equals(lastKey.getRow())) {
|
midKey.set(mk.getRow());
|
||||||
return new HStoreSize(aggregateSize, maxSize, false);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
|
|
|
@ -861,12 +861,10 @@ public class HStoreFile implements HConstants {
|
||||||
public synchronized void finalKey(WritableComparable key)
|
public synchronized void finalKey(WritableComparable key)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (top) {
|
if (top) {
|
||||||
checkKey(key);
|
|
||||||
super.finalKey(key);
|
super.finalKey(key);
|
||||||
} else {
|
} else {
|
||||||
reset();
|
reset();
|
||||||
Writable value = new ImmutableBytesWritable();
|
Writable value = new ImmutableBytesWritable();
|
||||||
|
|
||||||
key = super.getClosest(midkey, value, true);
|
key = super.getClosest(midkey, value, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue