HBASE-2457 RS gets stuck compacting region ad infinitum

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@945410 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-05-17 22:23:32 +00:00
parent afd149c7aa
commit 0491cb140a
2 changed files with 14 additions and 4 deletions

View File

@ -326,6 +326,9 @@ Release 0.21.0 - Unreleased
(Nicolas Spiegelberg via Stack) (Nicolas Spiegelberg via Stack)
HBASE-2415 Disable META splitting in 0.20 (Todd Lipcon via Stack) HBASE-2415 Disable META splitting in 0.20 (Todd Lipcon via Stack)
HBASE-2421 Put hangs for 10 retries on failed region servers HBASE-2421 Put hangs for 10 retries on failed region servers
HBASE-2442 Log lease recovery catches IOException too widely
(Todd Lipcon via Stack)
HBASE-2457 RS gets stuck compacting region ad infinitum
IMPROVEMENTS IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable HBASE-1760 Cleanup TODOs in HTable

View File

@ -722,7 +722,7 @@ public class Store implements HConstants, HeapSize {
Reader r = file.getReader(); Reader r = file.getReader();
if (r == null) { if (r == null) {
LOG.warn("StoreFile " + file + " has a null Reader"); LOG.warn("StoreFile " + file + " has a null Reader");
continue; return null;
} }
long len = file.getReader().length(); long len = file.getReader().length();
fileSizes[i] = len; fileSizes[i] = len;
@ -734,9 +734,16 @@ public class Store implements HConstants, HeapSize {
// The rule is: if the largest(oldest) one is more than twice the // The rule is: if the largest(oldest) one is more than twice the
// size of the second, skip the largest, and continue to next..., // size of the second, skip the largest, and continue to next...,
// until we meet the compactionThreshold limit. // until we meet the compactionThreshold limit.
for (point = 0; point < countOfFiles - 1; point++) {
if ((fileSizes[point] < fileSizes[point + 1] * 2) && // A problem with the above heuristic is that we could go through all of
(countOfFiles - point) <= maxFilesToCompact) { // filesToCompact and the above condition could hold for all files and
// we'd end up with nothing to compact. To protect against this, we'll
// compact the tail -- up to the last 4 files -- of filesToCompact
// regardless.
int tail = Math.min(countOfFiles, 4);
for (point = 0; point < (countOfFiles - tail); point++) {
if (((fileSizes[point] < fileSizes[point + 1] * 2) &&
(countOfFiles - point) <= maxFilesToCompact)) {
break; break;
} }
skipped += fileSizes[point]; skipped += fileSizes[point];