HBASE-1238 Under upload, region servers are unable
to compact when loaded with hundreds of regions This is a hack, compactions may run prematurely. git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@751023 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
094a2030f0
commit
0aafa3ef8f
|
@ -35,6 +35,8 @@ Release 0.20.0 - Unreleased
|
|||
buffer each iteration-1185 wrong request/sec in the gui
|
||||
reporting wrong (Brian Beggs via Stack)
|
||||
HBASE-1245 hfile meta block handling bugs (Ryan Rawson via Stack)
|
||||
HBASE-1238 Under upload, region servers are unable
|
||||
to compact when loaded with hundreds of regions
|
||||
|
||||
IMPROVEMENTS
|
||||
HBASE-1089 Add count of regions on filesystem to master UI; add percentage
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.ArrayList;
|
||||
import java.util.ConcurrentModificationException;
|
||||
import java.util.HashSet;
|
||||
import java.util.SortedMap;
|
||||
|
@ -268,6 +269,7 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
private synchronized void flushSomeRegions() {
|
||||
// keep flushing until we hit the low water mark
|
||||
long globalMemcacheSize = -1;
|
||||
ArrayList<HRegion> regionsToCompact = new ArrayList();
|
||||
for (SortedMap<Long, HRegion> m =
|
||||
this.server.getCopyOfOnlineRegionsSortedBySize();
|
||||
(globalMemcacheSize = server.getGlobalMemcacheSize()) >=
|
||||
|
@ -292,6 +294,10 @@ class MemcacheFlusher extends Thread implements FlushRequester {
|
|||
LOG.warn("Flush failed");
|
||||
break;
|
||||
}
|
||||
regionsToCompact.add(biggestMemcacheRegion);
|
||||
}
|
||||
for (HRegion region : regionsToCompact) {
|
||||
server.compactSplitThread.compactionRequested(region, getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue