mirror of https://github.com/apache/lucene.git
LUCENE-2739: refactor TestIndexWriter, pull out _OnDiskFull and _WithThreads
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1031076 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
154105635d
commit
ac4674aa20
|
@ -159,377 +159,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
/*
|
||||
Test: make sure when we run out of disk space or hit
|
||||
random IOExceptions in any of the addIndexes(*) calls
|
||||
that 1) index is not corrupt (searcher can open/search
|
||||
it) and 2) transactional semantics are followed:
|
||||
either all or none of the incoming documents were in
|
||||
fact added.
|
||||
*/
|
||||
public void testAddIndexOnDiskFull() throws IOException
|
||||
{
|
||||
int START_COUNT = 57;
|
||||
int NUM_DIR = 50;
|
||||
int END_COUNT = START_COUNT + NUM_DIR*25;
|
||||
|
||||
// Build up a bunch of dirs that have indexes which we
|
||||
// will then merge together by calling addIndexes(*):
|
||||
Directory[] dirs = new Directory[NUM_DIR];
|
||||
long inputDiskUsage = 0;
|
||||
for(int i=0;i<NUM_DIR;i++) {
|
||||
dirs[i] = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
for(int j=0;j<25;j++) {
|
||||
addDocWithIndex(writer, 25*i+j);
|
||||
}
|
||||
writer.close();
|
||||
String[] files = dirs[i].listAll();
|
||||
for(int j=0;j<files.length;j++) {
|
||||
inputDiskUsage += dirs[i].fileLength(files[j]);
|
||||
}
|
||||
}
|
||||
|
||||
// Now, build a starting index that has START_COUNT docs. We
|
||||
// will then try to addIndexesNoOptimize into a copy of this:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
for(int j=0;j<START_COUNT;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// Make sure starting index seems to be working properly:
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(startDir, true);
|
||||
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 57, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// Iterate with larger and larger amounts of free
|
||||
// disk space. With little free disk space,
|
||||
// addIndexes will certainly run out of space &
|
||||
// fail. Verify that when this happens, index is
|
||||
// not corrupt and index in fact has added no
|
||||
// documents. Then, we increase disk space by 2000
|
||||
// bytes each iteration. At some point there is
|
||||
// enough free disk space and addIndexes should
|
||||
// succeed and index should show all documents were
|
||||
// added.
|
||||
|
||||
// String[] files = startDir.listAll();
|
||||
long diskUsage = startDir.sizeInBytes();
|
||||
|
||||
long startDiskUsage = 0;
|
||||
String[] files = startDir.listAll();
|
||||
for(int i=0;i<files.length;i++) {
|
||||
startDiskUsage += startDir.fileLength(files[i]);
|
||||
}
|
||||
|
||||
for(int iter=0;iter<3;iter++) {
|
||||
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: iter=" + iter);
|
||||
|
||||
// Start with 100 bytes more than we are currently using:
|
||||
long diskFree = diskUsage+100;
|
||||
|
||||
int method = iter;
|
||||
|
||||
boolean success = false;
|
||||
boolean done = false;
|
||||
|
||||
String methodName;
|
||||
if (0 == method) {
|
||||
methodName = "addIndexes(Directory[]) + optimize()";
|
||||
} else if (1 == method) {
|
||||
methodName = "addIndexes(IndexReader[])";
|
||||
} else {
|
||||
methodName = "addIndexes(Directory[])";
|
||||
}
|
||||
|
||||
while(!done) {
|
||||
|
||||
// Make a new dir that will enforce disk usage:
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
IOException err = null;
|
||||
|
||||
MergeScheduler ms = writer.getConfig().getMergeScheduler();
|
||||
for(int x=0;x<2;x++) {
|
||||
if (ms instanceof ConcurrentMergeScheduler)
|
||||
// This test intentionally produces exceptions
|
||||
// in the threads that CMS launches; we don't
|
||||
// want to pollute test output with these.
|
||||
if (0 == x)
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
else
|
||||
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
|
||||
|
||||
// Two loops: first time, limit disk space &
|
||||
// throw random IOExceptions; second time, no
|
||||
// disk space limit:
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
|
||||
String testName = null;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE)
|
||||
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE)
|
||||
testName = "disk full test " + methodName + " with unlimited disk space";
|
||||
}
|
||||
|
||||
if (VERBOSE)
|
||||
System.out.println("\ncycle: " + testName);
|
||||
|
||||
dir.setTrackDiskUsage(true);
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate, diskFree);
|
||||
|
||||
try {
|
||||
|
||||
if (0 == method) {
|
||||
writer.addIndexes(dirs);
|
||||
writer.optimize();
|
||||
} else if (1 == method) {
|
||||
IndexReader readers[] = new IndexReader[dirs.length];
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
readers[i] = IndexReader.open(dirs[i], true);
|
||||
}
|
||||
try {
|
||||
writer.addIndexes(readers);
|
||||
} finally {
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
readers[i].close();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writer.addIndexes(dirs);
|
||||
}
|
||||
|
||||
success = true;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" success!");
|
||||
}
|
||||
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
success = false;
|
||||
err = e;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
|
||||
if (1 == x) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(methodName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all threads from
|
||||
// ConcurrentMergeScheduler are done
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" now test readers");
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs added, and if we
|
||||
// failed, we see either all docs or no docs added
|
||||
// (transactional semantics):
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(testName + ": exception when creating IndexReader: " + e);
|
||||
}
|
||||
int result = reader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != START_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace(System.out);
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher = new IndexSearcher(reader);
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != result) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != result) {
|
||||
err.printStackTrace(System.out);
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
reader.close();
|
||||
if (VERBOSE) {
|
||||
System.out.println(" count is " + result);
|
||||
}
|
||||
|
||||
if (done || result == END_COUNT) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
|
||||
}
|
||||
|
||||
if (done) {
|
||||
// Javadocs state that temp free Directory space
|
||||
// required is at most 2X total input size of
|
||||
// indices so let's make sure:
|
||||
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
|
||||
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
|
||||
"starting disk usage = " + startDiskUsage + " bytes; " +
|
||||
"input index disk usage = " + inputDiskUsage + " bytes",
|
||||
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
|
||||
}
|
||||
|
||||
// Make sure we don't hit disk full during close below:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
dir.setRandomIOExceptionRate(0.0, 0);
|
||||
|
||||
writer.close();
|
||||
|
||||
// Wait for all BG threads to finish else
|
||||
// dir.close() will throw IOException because
|
||||
// there are still open files
|
||||
_TestUtil.syncConcurrentMerges(ms);
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with 5000 more bytes of free space:
|
||||
diskFree += 5000;
|
||||
}
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
for (Directory dir : dirs)
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure IndexWriter cleans up on hitting a disk
|
||||
* full exception in addDocument.
|
||||
* TODO: how to do this on windows with FSDirectory?
|
||||
*/
|
||||
public void testAddDocumentOnDiskFull() throws IOException {
|
||||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: pass=" + pass);
|
||||
boolean doAbort = pass == 1;
|
||||
long diskFree = 200;
|
||||
while(true) {
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: cycle: diskFree=" + diskFree);
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory());
|
||||
dir.setMaxSizeInBytes(diskFree);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
MergeScheduler ms = writer.getConfig().getMergeScheduler();
|
||||
if (ms instanceof ConcurrentMergeScheduler)
|
||||
// This test intentionally produces exceptions
|
||||
// in the threads that CMS launches; we don't
|
||||
// want to pollute test output with these.
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
|
||||
boolean hitError = false;
|
||||
try {
|
||||
for(int i=0;i<200;i++) {
|
||||
addDoc(writer);
|
||||
}
|
||||
writer.commit();
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: exception on addDoc");
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
hitError = true;
|
||||
}
|
||||
|
||||
if (hitError) {
|
||||
if (doAbort) {
|
||||
writer.rollback();
|
||||
} else {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: exception on close");
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
//_TestUtil.syncConcurrentMerges(ms);
|
||||
|
||||
if (_TestUtil.anyFilesExceptWriteLock(dir)) {
|
||||
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
|
||||
|
||||
// Make sure reader can open the index:
|
||||
IndexReader.open(dir, true).close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
// Now try again w/ more space:
|
||||
|
||||
diskFree += 500;
|
||||
} else {
|
||||
//_TestUtil.syncConcurrentMerges(writer);
|
||||
writer.close();
|
||||
dir.close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException {
|
||||
String[] startFiles = dir.listAll();
|
||||
|
@ -2190,421 +1820,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
directory.close();
|
||||
}
|
||||
|
||||
// Used by test cases below
|
||||
private class IndexerThread extends Thread {
|
||||
|
||||
boolean diskFull;
|
||||
Throwable error;
|
||||
AlreadyClosedException ace;
|
||||
IndexWriter writer;
|
||||
boolean noErrors;
|
||||
volatile int addCount;
|
||||
|
||||
public IndexerThread(IndexWriter writer, boolean noErrors) {
|
||||
this.writer = writer;
|
||||
this.noErrors = noErrors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
||||
int idUpto = 0;
|
||||
int fullCount = 0;
|
||||
final long stopTime = System.currentTimeMillis() + 200;
|
||||
|
||||
do {
|
||||
try {
|
||||
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
|
||||
addCount++;
|
||||
} catch (IOException ioe) {
|
||||
//System.out.println(Thread.currentThread().getName() + ": hit exc");
|
||||
//ioe.printStackTrace(System.out);
|
||||
if (ioe.getMessage().startsWith("fake disk full at") ||
|
||||
ioe.getMessage().equals("now failing on purpose")) {
|
||||
diskFull = true;
|
||||
try {
|
||||
Thread.sleep(1);
|
||||
} catch (InterruptedException ie) {
|
||||
throw new ThreadInterruptedException(ie);
|
||||
}
|
||||
if (fullCount++ >= 5)
|
||||
break;
|
||||
} else {
|
||||
if (noErrors) {
|
||||
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
|
||||
ioe.printStackTrace(System.out);
|
||||
error = ioe;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
//t.printStackTrace(System.out);
|
||||
if (noErrors) {
|
||||
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
|
||||
t.printStackTrace(System.out);
|
||||
error = t;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} while(System.currentTimeMillis() < stopTime);
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure we can close() even while
|
||||
// threads are trying to add documents. Strictly
|
||||
// speaking, this isn't valid us of Lucene's APIs, but we
|
||||
// still want to be robust to this case:
|
||||
public void testCloseWithThreads() throws Exception {
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<7;iter++) {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect AlreadyClosedException
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, false);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
boolean done = false;
|
||||
while(!done) {
|
||||
Thread.sleep(100);
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
// only stop when at least one thread has added a doc
|
||||
if (threads[i].addCount > 0) {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
writer.close(false);
|
||||
|
||||
// Make sure threads that are adding docs are not hung:
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
// Without fix for LUCENE-1130: one of the
|
||||
// threads will hang
|
||||
threads[i].join();
|
||||
if (threads[i].isAlive())
|
||||
fail("thread seems to be hung");
|
||||
}
|
||||
|
||||
// Quick test to make sure index is not corrupt:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
"field",
|
||||
new BytesRef("aaa"));
|
||||
int count = 0;
|
||||
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
count++;
|
||||
}
|
||||
assertTrue(count > 0);
|
||||
reader.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure immeidate disk full on creating
|
||||
// an IndexWriter (hit during DW.ThreadState.init()) is
|
||||
// OK:
|
||||
public void testImmediateDiskFull() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
// Without fix for LUCENE-1130: this call will hang:
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
try {
|
||||
writer.close(false);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
|
||||
// Make sure once disk space is avail again, we can
|
||||
// cleanly close:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure immediate disk full on creating
|
||||
// an IndexWriter (hit during DW.ThreadState.init()), with
|
||||
// multiple threads, is OK:
|
||||
public void testImmediateDiskFullWithThreads() throws Exception {
|
||||
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect disk full exceptions in the merge threads
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
dir.setMaxSizeInBytes(4*1024+20*iter);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, true);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
// Without fix for LUCENE-1130: one of the
|
||||
// threads will hang
|
||||
threads[i].join();
|
||||
assertTrue("hit unexpected Throwable", threads[i].error == null);
|
||||
}
|
||||
|
||||
// Make sure once disk space is avail again, we can
|
||||
// cleanly close:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
|
||||
private static class FailOnlyOnAbortOrFlush extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("abort".equals(trace[i].getMethodName()) ||
|
||||
"flushDocument".equals(trace[i].getMethodName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
//System.out.println(Thread.currentThread().getName() + ": now fail");
|
||||
//new Throwable().printStackTrace(System.out);
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Runs test, with one thread, using the specific failure
|
||||
// to trigger an IOException
|
||||
public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
||||
for(int i=0;i<6;i++)
|
||||
writer.addDocument(doc);
|
||||
|
||||
dir.failOn(failure);
|
||||
failure.setDoFail();
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
fail("did not hit exception");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
failure.clearDoFail();
|
||||
writer.addDocument(doc);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Runs test, with multiple threads, using the specific
|
||||
// failure to trigger an IOException
|
||||
public void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure) throws Exception {
|
||||
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect disk full exceptions in the merge threads
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, true);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
Thread.sleep(10);
|
||||
|
||||
dir.failOn(failure);
|
||||
failure.setDoFail();
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
threads[i].join();
|
||||
assertTrue("hit unexpected Throwable", threads[i].error == null);
|
||||
}
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
writer.close(false);
|
||||
success = true;
|
||||
} catch (IOException ioe) {
|
||||
failure.clearDoFail();
|
||||
writer.close(false);
|
||||
}
|
||||
|
||||
if (success) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbort() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
||||
// Throws IOException during DocumentsWriter.closeDocStore
|
||||
private static class FailOnlyInCloseDocStore extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyInCloseDocStore(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("closeDocStore".equals(trace[i].getMethodName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore
|
||||
public void testIOExceptionDuringCloseDocStore() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore
|
||||
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore, with threads
|
||||
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore, with threads
|
||||
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
|
||||
}
|
||||
|
||||
// Throws IOException during DocumentsWriter.writeSegment
|
||||
private static class FailOnlyInWriteSegment extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyInWriteSegment(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment
|
||||
public void testIOExceptionDuringWriteSegment() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment
|
||||
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment, with threads
|
||||
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment, with threads
|
||||
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
|
||||
}
|
||||
|
||||
// LUCENE-1084: test unlimited field length
|
||||
public void testUnlimitedMaxFieldLength() throws IOException {
|
||||
|
@ -5250,65 +4465,4 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private static class FailTwiceDuringMerge extends MockDirectoryWrapper.Failure {
|
||||
public boolean didFail1;
|
||||
public boolean didFail2;
|
||||
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (!doFail) {
|
||||
return;
|
||||
}
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("org.apache.lucene.index.SegmentMerger".equals(trace[i].getClassName()) && "mergeTerms".equals(trace[i].getMethodName()) && !didFail1) {
|
||||
didFail1 = true;
|
||||
throw new IOException("fake disk full during mergeTerms");
|
||||
}
|
||||
if ("org.apache.lucene.util.BitVector".equals(trace[i].getClassName()) && "write".equals(trace[i].getMethodName()) && !didFail2) {
|
||||
didFail2 = true;
|
||||
throw new IOException("fake disk full while writing BitVector");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-2593
|
||||
public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
//IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setReaderPooling(true));
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new SerialMergeScheduler()).setReaderPooling(true));
|
||||
|
||||
((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
|
||||
w.commit();
|
||||
|
||||
w.deleteDocuments(new Term("f", "who"));
|
||||
w.addDocument(doc);
|
||||
|
||||
// disk fills up!
|
||||
FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge();
|
||||
ftdm.setDoFail();
|
||||
dir.failOn(ftdm);
|
||||
|
||||
try {
|
||||
w.commit();
|
||||
fail("fake disk full IOExceptions not hit");
|
||||
} catch (IOException ioe) {
|
||||
// expected
|
||||
assertTrue(ftdm.didFail1);
|
||||
}
|
||||
_TestUtil.checkIndex(dir);
|
||||
ftdm.clearDoFail();
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,525 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
import static org.apache.lucene.index.TestIndexWriter.assertNoUnreferencedFiles;
|
||||
|
||||
/**
|
||||
* Tests for IndexWriter when the disk runs out of space
|
||||
*/
|
||||
public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
||||
|
||||
/*
|
||||
* Make sure IndexWriter cleans up on hitting a disk
|
||||
* full exception in addDocument.
|
||||
* TODO: how to do this on windows with FSDirectory?
|
||||
*/
|
||||
public void testAddDocumentOnDiskFull() throws IOException {
|
||||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: pass=" + pass);
|
||||
boolean doAbort = pass == 1;
|
||||
long diskFree = 200;
|
||||
while(true) {
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: cycle: diskFree=" + diskFree);
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory());
|
||||
dir.setMaxSizeInBytes(diskFree);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
MergeScheduler ms = writer.getConfig().getMergeScheduler();
|
||||
if (ms instanceof ConcurrentMergeScheduler)
|
||||
// This test intentionally produces exceptions
|
||||
// in the threads that CMS launches; we don't
|
||||
// want to pollute test output with these.
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
|
||||
boolean hitError = false;
|
||||
try {
|
||||
for(int i=0;i<200;i++) {
|
||||
addDoc(writer);
|
||||
}
|
||||
writer.commit();
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: exception on addDoc");
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
hitError = true;
|
||||
}
|
||||
|
||||
if (hitError) {
|
||||
if (doAbort) {
|
||||
writer.rollback();
|
||||
} else {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: exception on close");
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
//_TestUtil.syncConcurrentMerges(ms);
|
||||
|
||||
if (_TestUtil.anyFilesExceptWriteLock(dir)) {
|
||||
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
|
||||
|
||||
// Make sure reader can open the index:
|
||||
IndexReader.open(dir, true).close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
// Now try again w/ more space:
|
||||
|
||||
diskFree += 500;
|
||||
} else {
|
||||
//_TestUtil.syncConcurrentMerges(writer);
|
||||
writer.close();
|
||||
dir.close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Test: make sure when we run out of disk space or hit
|
||||
random IOExceptions in any of the addIndexes(*) calls
|
||||
that 1) index is not corrupt (searcher can open/search
|
||||
it) and 2) transactional semantics are followed:
|
||||
either all or none of the incoming documents were in
|
||||
fact added.
|
||||
*/
|
||||
public void testAddIndexOnDiskFull() throws IOException
|
||||
{
|
||||
int START_COUNT = 57;
|
||||
int NUM_DIR = 50;
|
||||
int END_COUNT = START_COUNT + NUM_DIR*25;
|
||||
|
||||
// Build up a bunch of dirs that have indexes which we
|
||||
// will then merge together by calling addIndexes(*):
|
||||
Directory[] dirs = new Directory[NUM_DIR];
|
||||
long inputDiskUsage = 0;
|
||||
for(int i=0;i<NUM_DIR;i++) {
|
||||
dirs[i] = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dirs[i], newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
for(int j=0;j<25;j++) {
|
||||
addDocWithIndex(writer, 25*i+j);
|
||||
}
|
||||
writer.close();
|
||||
String[] files = dirs[i].listAll();
|
||||
for(int j=0;j<files.length;j++) {
|
||||
inputDiskUsage += dirs[i].fileLength(files[j]);
|
||||
}
|
||||
}
|
||||
|
||||
// Now, build a starting index that has START_COUNT docs. We
|
||||
// will then try to addIndexesNoOptimize into a copy of this:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
for(int j=0;j<START_COUNT;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// Make sure starting index seems to be working properly:
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(startDir, true);
|
||||
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 57, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// Iterate with larger and larger amounts of free
|
||||
// disk space. With little free disk space,
|
||||
// addIndexes will certainly run out of space &
|
||||
// fail. Verify that when this happens, index is
|
||||
// not corrupt and index in fact has added no
|
||||
// documents. Then, we increase disk space by 2000
|
||||
// bytes each iteration. At some point there is
|
||||
// enough free disk space and addIndexes should
|
||||
// succeed and index should show all documents were
|
||||
// added.
|
||||
|
||||
// String[] files = startDir.listAll();
|
||||
long diskUsage = startDir.sizeInBytes();
|
||||
|
||||
long startDiskUsage = 0;
|
||||
String[] files = startDir.listAll();
|
||||
for(int i=0;i<files.length;i++) {
|
||||
startDiskUsage += startDir.fileLength(files[i]);
|
||||
}
|
||||
|
||||
for(int iter=0;iter<3;iter++) {
|
||||
|
||||
if (VERBOSE)
|
||||
System.out.println("TEST: iter=" + iter);
|
||||
|
||||
// Start with 100 bytes more than we are currently using:
|
||||
long diskFree = diskUsage+100;
|
||||
|
||||
int method = iter;
|
||||
|
||||
boolean success = false;
|
||||
boolean done = false;
|
||||
|
||||
String methodName;
|
||||
if (0 == method) {
|
||||
methodName = "addIndexes(Directory[]) + optimize()";
|
||||
} else if (1 == method) {
|
||||
methodName = "addIndexes(IndexReader[])";
|
||||
} else {
|
||||
methodName = "addIndexes(Directory[])";
|
||||
}
|
||||
|
||||
while(!done) {
|
||||
|
||||
// Make a new dir that will enforce disk usage:
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory(startDir));
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
IOException err = null;
|
||||
|
||||
MergeScheduler ms = writer.getConfig().getMergeScheduler();
|
||||
for(int x=0;x<2;x++) {
|
||||
if (ms instanceof ConcurrentMergeScheduler)
|
||||
// This test intentionally produces exceptions
|
||||
// in the threads that CMS launches; we don't
|
||||
// want to pollute test output with these.
|
||||
if (0 == x)
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
else
|
||||
((ConcurrentMergeScheduler) ms).clearSuppressExceptions();
|
||||
|
||||
// Two loops: first time, limit disk space &
|
||||
// throw random IOExceptions; second time, no
|
||||
// disk space limit:
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
|
||||
String testName = null;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE)
|
||||
testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE)
|
||||
testName = "disk full test " + methodName + " with unlimited disk space";
|
||||
}
|
||||
|
||||
if (VERBOSE)
|
||||
System.out.println("\ncycle: " + testName);
|
||||
|
||||
dir.setTrackDiskUsage(true);
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate, diskFree);
|
||||
|
||||
try {
|
||||
|
||||
if (0 == method) {
|
||||
writer.addIndexes(dirs);
|
||||
writer.optimize();
|
||||
} else if (1 == method) {
|
||||
IndexReader readers[] = new IndexReader[dirs.length];
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
readers[i] = IndexReader.open(dirs[i], true);
|
||||
}
|
||||
try {
|
||||
writer.addIndexes(readers);
|
||||
} finally {
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
readers[i].close();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writer.addIndexes(dirs);
|
||||
}
|
||||
|
||||
success = true;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" success!");
|
||||
}
|
||||
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
success = false;
|
||||
err = e;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
|
||||
if (1 == x) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(methodName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all threads from
|
||||
// ConcurrentMergeScheduler are done
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" now test readers");
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs added, and if we
|
||||
// failed, we see either all docs or no docs added
|
||||
// (transactional semantics):
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(testName + ": exception when creating IndexReader: " + e);
|
||||
}
|
||||
int result = reader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != START_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace(System.out);
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher = new IndexSearcher(reader);
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != result) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != result) {
|
||||
err.printStackTrace(System.out);
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
reader.close();
|
||||
if (VERBOSE) {
|
||||
System.out.println(" count is " + result);
|
||||
}
|
||||
|
||||
if (done || result == END_COUNT) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.getMaxUsedSizeInBytes());
|
||||
}
|
||||
|
||||
if (done) {
|
||||
// Javadocs state that temp free Directory space
|
||||
// required is at most 2X total input size of
|
||||
// indices so let's make sure:
|
||||
assertTrue("max free Directory space required exceeded 1X the total input index sizes during " + methodName +
|
||||
": max temp usage = " + (dir.getMaxUsedSizeInBytes()-startDiskUsage) + " bytes; " +
|
||||
"starting disk usage = " + startDiskUsage + " bytes; " +
|
||||
"input index disk usage = " + inputDiskUsage + " bytes",
|
||||
(dir.getMaxUsedSizeInBytes()-startDiskUsage) < 2*(startDiskUsage + inputDiskUsage));
|
||||
}
|
||||
|
||||
// Make sure we don't hit disk full during close below:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
dir.setRandomIOExceptionRate(0.0, 0);
|
||||
|
||||
writer.close();
|
||||
|
||||
// Wait for all BG threads to finish else
|
||||
// dir.close() will throw IOException because
|
||||
// there are still open files
|
||||
_TestUtil.syncConcurrentMerges(ms);
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with 5000 more bytes of free space:
|
||||
diskFree += 5000;
|
||||
}
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
for (Directory dir : dirs)
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private static class FailTwiceDuringMerge extends MockDirectoryWrapper.Failure {
|
||||
public boolean didFail1;
|
||||
public boolean didFail2;
|
||||
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (!doFail) {
|
||||
return;
|
||||
}
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("org.apache.lucene.index.SegmentMerger".equals(trace[i].getClassName()) && "mergeTerms".equals(trace[i].getMethodName()) && !didFail1) {
|
||||
didFail1 = true;
|
||||
throw new IOException("fake disk full during mergeTerms");
|
||||
}
|
||||
if ("org.apache.lucene.util.BitVector".equals(trace[i].getClassName()) && "write".equals(trace[i].getMethodName()) && !didFail2) {
|
||||
didFail2 = true;
|
||||
throw new IOException("fake disk full while writing BitVector");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-2593
|
||||
public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
//IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setReaderPooling(true));
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new SerialMergeScheduler()).setReaderPooling(true));
|
||||
|
||||
((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
|
||||
w.commit();
|
||||
|
||||
w.deleteDocuments(new Term("f", "who"));
|
||||
w.addDocument(doc);
|
||||
|
||||
// disk fills up!
|
||||
FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge();
|
||||
ftdm.setDoFail();
|
||||
dir.failOn(ftdm);
|
||||
|
||||
try {
|
||||
w.commit();
|
||||
fail("fake disk full IOExceptions not hit");
|
||||
} catch (IOException ioe) {
|
||||
// expected
|
||||
assertTrue(ftdm.didFail1);
|
||||
}
|
||||
_TestUtil.checkIndex(dir);
|
||||
ftdm.clearDoFail();
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure immeidate disk full on creating
|
||||
// an IndexWriter (hit during DW.ThreadState.init()) is
|
||||
// OK:
|
||||
public void testImmediateDiskFull() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
// Without fix for LUCENE-1130: this call will hang:
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
try {
|
||||
writer.close(false);
|
||||
fail("did not hit disk full");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
|
||||
// Make sure once disk space is avail again, we can
|
||||
// cleanly close:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// TODO: these are also in TestIndexWriter... add a simple doc-writing method
|
||||
// like this to LuceneTestCase?
|
||||
private void addDoc(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
|
||||
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,422 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
|
||||
/**
|
||||
* MultiThreaded IndexWriter tests
|
||||
*/
|
||||
public class TestIndexWriterWithThreads extends LuceneTestCase {
|
||||
|
||||
// Used by test cases below
|
||||
private class IndexerThread extends Thread {
|
||||
|
||||
boolean diskFull;
|
||||
Throwable error;
|
||||
AlreadyClosedException ace;
|
||||
IndexWriter writer;
|
||||
boolean noErrors;
|
||||
volatile int addCount;
|
||||
|
||||
public IndexerThread(IndexWriter writer, boolean noErrors) {
|
||||
this.writer = writer;
|
||||
this.noErrors = noErrors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
||||
int idUpto = 0;
|
||||
int fullCount = 0;
|
||||
final long stopTime = System.currentTimeMillis() + 200;
|
||||
|
||||
do {
|
||||
try {
|
||||
writer.updateDocument(new Term("id", ""+(idUpto++)), doc);
|
||||
addCount++;
|
||||
} catch (IOException ioe) {
|
||||
//System.out.println(Thread.currentThread().getName() + ": hit exc");
|
||||
//ioe.printStackTrace(System.out);
|
||||
if (ioe.getMessage().startsWith("fake disk full at") ||
|
||||
ioe.getMessage().equals("now failing on purpose")) {
|
||||
diskFull = true;
|
||||
try {
|
||||
Thread.sleep(1);
|
||||
} catch (InterruptedException ie) {
|
||||
throw new ThreadInterruptedException(ie);
|
||||
}
|
||||
if (fullCount++ >= 5)
|
||||
break;
|
||||
} else {
|
||||
if (noErrors) {
|
||||
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:");
|
||||
ioe.printStackTrace(System.out);
|
||||
error = ioe;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
//t.printStackTrace(System.out);
|
||||
if (noErrors) {
|
||||
System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:");
|
||||
t.printStackTrace(System.out);
|
||||
error = t;
|
||||
}
|
||||
break;
|
||||
}
|
||||
} while(System.currentTimeMillis() < stopTime);
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure immediate disk full on creating
|
||||
// an IndexWriter (hit during DW.ThreadState.init()), with
|
||||
// multiple threads, is OK:
|
||||
public void testImmediateDiskFullWithThreads() throws Exception {
|
||||
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect disk full exceptions in the merge threads
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
dir.setMaxSizeInBytes(4*1024+20*iter);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, true);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
// Without fix for LUCENE-1130: one of the
|
||||
// threads will hang
|
||||
threads[i].join();
|
||||
assertTrue("hit unexpected Throwable", threads[i].error == null);
|
||||
}
|
||||
|
||||
// Make sure once disk space is avail again, we can
|
||||
// cleanly close:
|
||||
dir.setMaxSizeInBytes(0);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// LUCENE-1130: make sure we can close() even while
|
||||
// threads are trying to add documents. Strictly
|
||||
// speaking, this isn't valid us of Lucene's APIs, but we
|
||||
// still want to be robust to this case:
|
||||
public void testCloseWithThreads() throws Exception {
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<7;iter++) {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect AlreadyClosedException
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, false);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
boolean done = false;
|
||||
while(!done) {
|
||||
Thread.sleep(100);
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
// only stop when at least one thread has added a doc
|
||||
if (threads[i].addCount > 0) {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
writer.close(false);
|
||||
|
||||
// Make sure threads that are adding docs are not hung:
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
// Without fix for LUCENE-1130: one of the
|
||||
// threads will hang
|
||||
threads[i].join();
|
||||
if (threads[i].isAlive())
|
||||
fail("thread seems to be hung");
|
||||
}
|
||||
|
||||
// Quick test to make sure index is not corrupt:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
"field",
|
||||
new BytesRef("aaa"));
|
||||
int count = 0;
|
||||
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
count++;
|
||||
}
|
||||
assertTrue(count > 0);
|
||||
reader.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Runs test, with multiple threads, using the specific
|
||||
// failure to trigger an IOException
|
||||
public void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure) throws Exception {
|
||||
|
||||
int NUM_THREADS = 3;
|
||||
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler());
|
||||
// We expect disk full exceptions in the merge threads
|
||||
((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
|
||||
IndexerThread[] threads = new IndexerThread[NUM_THREADS];
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i] = new IndexerThread(writer, true);
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++)
|
||||
threads[i].start();
|
||||
|
||||
Thread.sleep(10);
|
||||
|
||||
dir.failOn(failure);
|
||||
failure.setDoFail();
|
||||
|
||||
for(int i=0;i<NUM_THREADS;i++) {
|
||||
threads[i].join();
|
||||
assertTrue("hit unexpected Throwable", threads[i].error == null);
|
||||
}
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
writer.close(false);
|
||||
success = true;
|
||||
} catch (IOException ioe) {
|
||||
failure.clearDoFail();
|
||||
writer.close(false);
|
||||
}
|
||||
|
||||
if (success) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Runs test, with one thread, using the specific failure
|
||||
// to trigger an IOException
|
||||
public void _testSingleThreadFailure(MockDirectoryWrapper.Failure failure) throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
|
||||
for(int i=0;i<6;i++)
|
||||
writer.addDocument(doc);
|
||||
|
||||
dir.failOn(failure);
|
||||
failure.setDoFail();
|
||||
try {
|
||||
writer.addDocument(doc);
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
fail("did not hit exception");
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
failure.clearDoFail();
|
||||
writer.addDocument(doc);
|
||||
writer.close(false);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Throws IOException during FieldsWriter.flushDocument and during DocumentsWriter.abort
|
||||
private static class FailOnlyOnAbortOrFlush extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyOnAbortOrFlush(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("abort".equals(trace[i].getMethodName()) ||
|
||||
"flushDocument".equals(trace[i].getMethodName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
//System.out.println(Thread.currentThread().getName() + ": now fail");
|
||||
//new Throwable().printStackTrace(System.out);
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbort() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), is OK:
|
||||
public void testIOExceptionDuringAbortOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: make sure initial IOException, and then 2nd
|
||||
// IOException during rollback(), with multiple threads, is OK:
|
||||
public void testIOExceptionDuringAbortWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyOnAbortOrFlush(true));
|
||||
}
|
||||
|
||||
// Throws IOException during DocumentsWriter.closeDocStore
|
||||
private static class FailOnlyInCloseDocStore extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyInCloseDocStore(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("closeDocStore".equals(trace[i].getMethodName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore
|
||||
public void testIOExceptionDuringCloseDocStore() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInCloseDocStore(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore
|
||||
public void testIOExceptionDuringCloseDocStoreOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInCloseDocStore(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore, with threads
|
||||
public void testIOExceptionDuringCloseDocStoreWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in closeDocStore, with threads
|
||||
public void testIOExceptionDuringCloseDocStoreWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInCloseDocStore(true));
|
||||
}
|
||||
|
||||
// Throws IOException during DocumentsWriter.writeSegment
|
||||
private static class FailOnlyInWriteSegment extends MockDirectoryWrapper.Failure {
|
||||
private boolean onlyOnce;
|
||||
public FailOnlyInWriteSegment(boolean onlyOnce) {
|
||||
this.onlyOnce = onlyOnce;
|
||||
}
|
||||
@Override
|
||||
public void eval(MockDirectoryWrapper dir) throws IOException {
|
||||
if (doFail) {
|
||||
StackTraceElement[] trace = new Exception().getStackTrace();
|
||||
for (int i = 0; i < trace.length; i++) {
|
||||
if ("flush".equals(trace[i].getMethodName()) && "org.apache.lucene.index.DocFieldProcessor".equals(trace[i].getClassName())) {
|
||||
if (onlyOnce)
|
||||
doFail = false;
|
||||
throw new IOException("now failing on purpose");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment
|
||||
public void testIOExceptionDuringWriteSegment() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInWriteSegment(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment
|
||||
public void testIOExceptionDuringWriteSegmentOnlyOnce() throws IOException {
|
||||
_testSingleThreadFailure(new FailOnlyInWriteSegment(true));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment, with threads
|
||||
public void testIOExceptionDuringWriteSegmentWithThreads() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(false));
|
||||
}
|
||||
|
||||
// LUCENE-1130: test IOException in writeSegment, with threads
|
||||
public void testIOExceptionDuringWriteSegmentWithThreadsOnlyOnce() throws Exception {
|
||||
_testMultipleThreadsFailure(new FailOnlyInWriteSegment(true));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue