fix false assert trip; fix LTC to be 'sticky' in how it changes the live IWC settings for flush triggers

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1595300 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2014-05-16 18:45:25 +00:00
parent e2ba693a6e
commit 3b13b09f98
4 changed files with 66 additions and 21 deletions

View File

@ -62,6 +62,7 @@ final class DocumentsWriterFlushControl {
long peakFlushBytes = 0;// only with assert
long peakNetBytes = 0;// only with assert
long peakDelta = 0; // only with assert
boolean flushByRAMWasDisabled; // only with assert
final DocumentsWriterStallControl stallControl;
private final DocumentsWriterPerThreadPool perThreadPool;
private final FlushPolicy flushPolicy;
@ -101,7 +102,9 @@ final class DocumentsWriterFlushControl {
private boolean assertMemory() {
final double maxRamMB = config.getRAMBufferSizeMB();
if (maxRamMB != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
// We can only assert if we have always been flushing by RAM usage; otherwise the assert will false trip if e.g. the
// flush-by-doc-count * doc size was large enough to use far more RAM than the sudden change to IWC's maxRAMBufferSizeMB:
if (maxRamMB != IndexWriterConfig.DISABLE_AUTO_FLUSH && flushByRAMWasDisabled == false) {
// for this assert we must be tolerant to ram buffer changes!
maxConfiguredRamBuffer = Math.max(maxRamMB, maxConfiguredRamBuffer);
final long ram = flushBytes + activeBytes;
@ -109,7 +112,7 @@ final class DocumentsWriterFlushControl {
// take peakDelta into account - worst case is that all flushing, pending and blocked DWPT had maxMem and the last doc had the peakDelta
// 2 * ramBufferBytes -> before we stall we need to cross the 2xRAM Buffer border this is still a valid limit
// (numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta) -> those are the total number of DWPT that are not active but not yet fully fluhsed
// (numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta) -> those are the total number of DWPT that are not active but not yet fully flushed
// all of them could theoretically be taken out of the loop once they crossed the RAM buffer and the last document was the peak delta
// (numDocsSinceStalled * peakDelta) -> at any given time there could be n threads in flight that crossed the stall control before we reached the limit and each of them could hold a peak document
final long expected = (2 * (ramBufferBytes)) + ((numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta) + (numDocsSinceStalled * peakDelta);
@ -128,8 +131,11 @@ final class DocumentsWriterFlushControl {
+ " byte, flush mem: " + flushBytes + ", active mem: " + activeBytes
+ ", pending DWPT: " + numPending + ", flushing DWPT: "
+ numFlushingDWPT() + ", blocked DWPT: " + numBlockedFlushes()
+ ", peakDelta mem: " + peakDelta + " byte";
+ ", peakDelta mem: " + peakDelta + " bytes, ramBufferBytes=" + ramBufferBytes
+ ", maxConfiguredRamBuffer=" + maxConfiguredRamBuffer;
}
} else {
flushByRAMWasDisabled = true;
}
return true;
}
@ -220,14 +226,14 @@ final class DocumentsWriterFlushControl {
assert assertMemory();
} finally {
try {
updateStallState();
updateStallState();
} finally {
notifyAll();
}
}
}
private final boolean updateStallState() {
private boolean updateStallState() {
assert Thread.holdsLock(this);
final long limit = stallLimitBytes();

View File

@ -295,6 +295,7 @@ public class RandomIndexWriter implements Closeable {
private void doRandomForceMerge() throws IOException {
if (doRandomForceMerge) {
final int segCount = w.getSegmentCount();
// nocommit put random force merge deletes here
if (r.nextBoolean() || segCount == 0) {
// full forceMerge
if (LuceneTestCase.VERBOSE) {

View File

@ -47,7 +47,6 @@ import java.util.Random;
import java.util.Set;
import java.util.TimeZone;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@ -631,6 +630,15 @@ public abstract class LuceneTestCase extends Assert {
private static final Map<String,FieldType> fieldToType = new HashMap<String,FieldType>();
enum LiveIWCFlushMode {BY_RAM, BY_DOCS, EITHER};
/** Set by TestRuleSetupAndRestoreClassEnv */
static LiveIWCFlushMode liveIWCFlushMode;
static void setLiveIWCFlushMode(LiveIWCFlushMode flushMode) {
liveIWCFlushMode = flushMode;
}
// -----------------------------------------------------------------
// Suite and test case setup/ cleanup.
// -----------------------------------------------------------------
@ -996,8 +1004,21 @@ public abstract class LuceneTestCase extends Assert {
// this is complicated because the api requires you "invoke setters in a magical order!"
// LUCENE-5661: workaround for race conditions in the API
synchronized (c) {
boolean flushByRam = r.nextBoolean();
if (flushByRam) {
boolean flushByRAM;
switch (liveIWCFlushMode) {
case BY_RAM:
flushByRAM = true;
break;
case BY_DOCS:
flushByRAM = false;
break;
case EITHER:
flushByRAM = random().nextBoolean();
break;
default:
throw new AssertionError();
}
if (flushByRAM) {
c.setRAMBufferSizeMB(TestUtil.nextInt(r, 1, 10));
c.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
} else {

View File

@ -17,17 +17,6 @@ package org.apache.lucene.util;
* limitations under the License.
*/
import static org.apache.lucene.util.LuceneTestCase.INFOSTREAM;
import static org.apache.lucene.util.LuceneTestCase.TEST_CODEC;
import static org.apache.lucene.util.LuceneTestCase.TEST_DOCVALUESFORMAT;
import static org.apache.lucene.util.LuceneTestCase.TEST_POSTINGSFORMAT;
import static org.apache.lucene.util.LuceneTestCase.VERBOSE;
import static org.apache.lucene.util.LuceneTestCase.assumeFalse;
import static org.apache.lucene.util.LuceneTestCase.localeForName;
import static org.apache.lucene.util.LuceneTestCase.random;
import static org.apache.lucene.util.LuceneTestCase.randomLocale;
import static org.apache.lucene.util.LuceneTestCase.randomTimeZone;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Date;
@ -59,10 +48,19 @@ import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.junit.internal.AssumptionViolatedException;
import com.carrotsearch.randomizedtesting.RandomizedContext;
import static org.apache.lucene.util.LuceneTestCase.INFOSTREAM;
import static org.apache.lucene.util.LuceneTestCase.LiveIWCFlushMode;
import static org.apache.lucene.util.LuceneTestCase.TEST_CODEC;
import static org.apache.lucene.util.LuceneTestCase.TEST_DOCVALUESFORMAT;
import static org.apache.lucene.util.LuceneTestCase.TEST_POSTINGSFORMAT;
import static org.apache.lucene.util.LuceneTestCase.VERBOSE;
import static org.apache.lucene.util.LuceneTestCase.assumeFalse;
import static org.apache.lucene.util.LuceneTestCase.localeForName;
import static org.apache.lucene.util.LuceneTestCase.random;
import static org.apache.lucene.util.LuceneTestCase.randomLocale;
import static org.apache.lucene.util.LuceneTestCase.randomTimeZone;
/**
* Setup and restore suite-level environment (fine grained junk that
@ -274,6 +272,25 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule {
Arrays.toString(avoidCodecs.toArray()));
throw e;
}
// We have "stickiness" so that sometimes all we do is vary the RAM buffer size, other times just the doc count to flush by, else both.
// This way the assertMemory in DocumentsWriterFlushControl sometimes runs (when we always flush by RAM).
LiveIWCFlushMode flushMode;
switch (random().nextInt(3)) {
case 0:
flushMode = LiveIWCFlushMode.BY_RAM;
break;
case 1:
flushMode = LiveIWCFlushMode.BY_DOCS;
break;
case 2:
flushMode = LiveIWCFlushMode.EITHER;
break;
default:
throw new AssertionError();
}
LuceneTestCase.setLiveIWCFlushMode(flushMode);
}
/**