Fix failures in TestPerFieldConsistency (#125)

This test assumes that there is no merging,
and was failing when there were merges.
This fixes the test but setting NoMergePolicy for
IndexWriter.

Relates to LUCENE-9334
Relates to #11
This commit is contained in:
Mayya Sharipova 2021-05-04 09:51:55 -04:00 committed by GitHub
parent c33d211d2a
commit b5a77de512
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 11 additions and 2 deletions

View File

@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.VectorValues;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@ -132,7 +133,9 @@ public class TestPerFieldConsistency extends LuceneTestCase {
public void testDocWithMissingSchemaOptionsThrowsError() throws IOException {
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig()); ) {
IndexWriter writer =
new IndexWriter(
dir, new IndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)); ) {
final Field[] fields = randomFieldsWithTheSameName("myfield");
final Document doc0 = new Document();
for (Field field : fields) {
@ -154,6 +157,7 @@ public class TestPerFieldConsistency extends LuceneTestCase {
}
writer.flush();
try (IndexReader reader = DirectoryReader.open(writer)) {
assertEquals(1, reader.leaves().size());
assertEquals(1, reader.leaves().get(0).reader().numDocs());
assertEquals(numNotIndexedDocs, reader.leaves().get(0).reader().numDeletedDocs());
}
@ -168,6 +172,7 @@ public class TestPerFieldConsistency extends LuceneTestCase {
writer.addDocument(doc0); // add document with correct data structures
writer.flush();
try (IndexReader reader = DirectoryReader.open(writer)) {
assertEquals(2, reader.leaves().size());
assertEquals(1, reader.leaves().get(1).reader().numDocs());
assertEquals(numNotIndexedDocs, reader.leaves().get(1).reader().numDeletedDocs());
}
@ -176,7 +181,9 @@ public class TestPerFieldConsistency extends LuceneTestCase {
public void testDocWithExtraSchemaOptionsThrowsError() throws IOException {
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig())) {
IndexWriter writer =
new IndexWriter(
dir, new IndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)); ) {
final Field[] fields = randomFieldsWithTheSameName("myfield");
final Document doc0 = new Document();
int existingFieldIdx = randomIntBetween(0, fields.length - 1);
@ -198,6 +205,7 @@ public class TestPerFieldConsistency extends LuceneTestCase {
}
writer.flush();
try (IndexReader reader = DirectoryReader.open(writer)) {
assertEquals(1, reader.leaves().size());
assertEquals(1, reader.leaves().get(0).reader().numDocs());
assertEquals(numNotIndexedDocs, reader.leaves().get(0).reader().numDeletedDocs());
}
@ -216,6 +224,7 @@ public class TestPerFieldConsistency extends LuceneTestCase {
writer.addDocument(doc0); // add document with correct data structures
writer.flush();
try (IndexReader reader = DirectoryReader.open(writer)) {
assertEquals(2, reader.leaves().size());
assertEquals(1, reader.leaves().get(1).reader().numDocs());
assertEquals(numNotIndexedDocs, reader.leaves().get(1).reader().numDeletedDocs());
}