LUCENE-3606: Die - IR.deleteDocument*(), IR.undeleteAll() - die, die, die!!!

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3606@1210153 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2011-12-04 17:35:15 +00:00
parent 04f8f2850a
commit e0b5b22cd5
31 changed files with 90 additions and 1514 deletions

View File

@ -1137,16 +1137,6 @@ public class MemoryIndex {
return false;
}
@Override
protected void doDelete(int docNum) {
throw new UnsupportedOperationException();
}
@Override
protected void doUndeleteAll() {
throw new UnsupportedOperationException();
}
@Override
protected void doCommit(Map<String,String> commitUserData) {
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");

View File

@ -57,18 +57,18 @@ public class MultiPassIndexSplitter {
* assigned in a deterministic round-robin fashion to one of the output splits.
* @throws IOException
*/
public void split(Version version, IndexReader input, Directory[] outputs, boolean seq) throws IOException {
public void split(Version version, IndexReader in, Directory[] outputs, boolean seq) throws IOException {
if (outputs == null || outputs.length < 2) {
throw new IOException("Invalid number of outputs.");
}
if (input == null || input.numDocs() < 2) {
if (in == null || in.numDocs() < 2) {
throw new IOException("Not enough documents for splitting");
}
int numParts = outputs.length;
// wrap a potentially read-only input
// this way we don't have to preserve original deletions because neither
// deleteDocument(int) or undeleteAll() is applied to the wrapped input index.
input = new FakeDeleteIndexReader(input);
FakeDeleteIndexReader input = new FakeDeleteIndexReader(in);
int maxDoc = input.maxDoc();
int partLen = maxDoc / numParts;
for (int i = 0; i < numParts; i++) {
@ -183,7 +183,7 @@ public class MultiPassIndexSplitter {
public FakeDeleteIndexReader(IndexReader in) {
super(new SlowMultiReaderWrapper(in));
doUndeleteAll(); // initialize main bitset
undeleteAll(); // initialize main bitset
}
@Override
@ -191,12 +191,7 @@ public class MultiPassIndexSplitter {
return liveDocs.cardinality();
}
/**
* Just removes our overlaid deletions - does not undelete the original
* deletions.
*/
@Override
protected void doUndeleteAll() {
void undeleteAll() {
final int maxDoc = in.maxDoc();
liveDocs = new FixedBitSet(in.maxDoc());
if (in.hasDeletions()) {
@ -212,8 +207,7 @@ public class MultiPassIndexSplitter {
}
}
@Override
protected void doDelete(int n) {
void deleteDocument(int n) {
liveDocs.clear(n);
}

View File

@ -94,63 +94,4 @@ public class TestIndexSplitter extends LuceneTestCase {
fsDir.close();
}
public void testDeleteThenFullMerge() throws Exception {
// Create directories where the indexes will reside
File indexPath = new File(TEMP_DIR, "testfilesplitter");
_TestUtil.rmDir(indexPath);
indexPath.mkdirs();
File indexSplitPath = new File(TEMP_DIR, "testfilesplitterdest");
_TestUtil.rmDir(indexSplitPath);
indexSplitPath.mkdirs();
// Create the original index
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
mergePolicy.setNoCFSRatio(1);
IndexWriterConfig iwConfig
= new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE)
.setMergePolicy(mergePolicy);
Directory fsDir = newFSDirectory(indexPath);
IndexWriter indexWriter = new IndexWriter(fsDir, iwConfig);
Document doc = new Document();
doc.add(new Field("content", "doc 1", StringField.TYPE_STORED));
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("content", "doc 2", StringField.TYPE_STORED));
indexWriter.addDocument(doc);
indexWriter.close();
fsDir.close();
// Create the split index
IndexSplitter indexSplitter = new IndexSplitter(indexPath);
String splitSegName = indexSplitter.infos.info(0).name;
indexSplitter.split(indexSplitPath, new String[] {splitSegName});
// Delete the first document in the split index
Directory fsDirDest = newFSDirectory(indexSplitPath);
IndexReader indexReader = IndexReader.open(fsDirDest, false);
indexReader.deleteDocument(0);
assertEquals(1, indexReader.numDocs());
indexReader.close();
fsDirDest.close();
// Fully merge the split index
mergePolicy = new LogByteSizeMergePolicy();
mergePolicy.setNoCFSRatio(1);
iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND)
.setMergePolicy(mergePolicy);
fsDirDest = newFSDirectory(indexSplitPath);
indexWriter = new IndexWriter(fsDirDest, iwConfig);
indexWriter.forceMerge(1);
indexWriter.close();
fsDirDest.close();
// Read the number of docs in the index
fsDirDest = newFSDirectory(indexSplitPath);
indexReader = IndexReader.open(fsDirDest);
assertEquals(1, indexReader.numDocs());
indexReader.close();
fsDirDest.close();
}
}

View File

@ -41,10 +41,10 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
doc.add(newField("f", i + " " + i, TextField.TYPE_STORED));
w.addDocument(doc);
}
w.commit();
w.deleteDocuments(new Term("id", "" + (NUM_DOCS-1)));
w.close();
input = IndexReader.open(dir, false);
// delete the last doc
input.deleteDocument(input.maxDoc() - 1);
input = IndexReader.open(dir);
}
@Override
@ -66,7 +66,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
};
splitter.split(TEST_VERSION_CURRENT, input, dirs, false);
IndexReader ir;
ir = IndexReader.open(dirs[0], true);
ir = IndexReader.open(dirs[0]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
Document doc = ir.document(0);
assertEquals("0", doc.get("id"));
@ -74,7 +74,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
assertNotSame("1", te.term().utf8ToString());
ir.close();
ir = IndexReader.open(dirs[1], true);
ir = IndexReader.open(dirs[1]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
doc = ir.document(0);
assertEquals("1", doc.get("id"));
@ -83,7 +83,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
assertNotSame("0", te.term().utf8ToString());
ir.close();
ir = IndexReader.open(dirs[2], true);
ir = IndexReader.open(dirs[2]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
doc = ir.document(0);
assertEquals("2", doc.get("id"));
@ -111,19 +111,19 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
};
splitter.split(TEST_VERSION_CURRENT, input, dirs, true);
IndexReader ir;
ir = IndexReader.open(dirs[0], true);
ir = IndexReader.open(dirs[0]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
Document doc = ir.document(0);
assertEquals("0", doc.get("id"));
int start = ir.numDocs();
ir.close();
ir = IndexReader.open(dirs[1], true);
ir = IndexReader.open(dirs[1]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
doc = ir.document(0);
assertEquals(start + "", doc.get("id"));
start += ir.numDocs();
ir.close();
ir = IndexReader.open(dirs[2], true);
ir = IndexReader.open(dirs[2]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
doc = ir.document(0);
assertEquals(start + "", doc.get("id"));

View File

@ -535,23 +535,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return readerIndex(n, this.starts, this.subReaders.length);
}

View File

@ -277,8 +277,6 @@ public class FilterIndexReader extends IndexReader {
/**
* <p>Construct a FilterIndexReader based on the specified base reader.
* Directory locking for delete, undeleteAll operations is
* left to the base reader.</p>
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
* @param in specified base reader.
*/
@ -331,9 +329,6 @@ public class FilterIndexReader extends IndexReader {
return in.hasDeletions();
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
@ -358,9 +353,6 @@ public class FilterIndexReader extends IndexReader {
return in.docFreq(field, t);
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException { in.deleteDocument(n); }
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
in.commit(commitUserData);

View File

@ -52,9 +52,6 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
as documents are added to and deleted from an index. Clients should thus not
rely on a given document having the same number between sessions.
<p> An IndexReader can be opened on a directory for which an IndexWriter is
opened already, but it cannot be used to delete documents from the index then.
<p>
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
as abstract, but have no useful implementations in this base class and
@ -1076,98 +1073,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
return null;
}
/** Deletes the document numbered <code>docNum</code>. Once a document is
* deleted it will not appear in TermDocs or TermPositions enumerations.
* Attempts to read its field with the {@link #document}
* method will result in an error. The presence of this document may still be
* reflected in the {@link #docFreq} statistic, though
* this will be corrected eventually as the index is further modified.
*
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doDelete(docNum);
}
/** Implements deletion of the document numbered <code>docNum</code>.
* Applications should call {@link #deleteDocument(int)} or {@link #deleteDocuments(Term)}.
*/
protected abstract void doDelete(int docNum) throws CorruptIndexException, IOException;
/** Deletes all documents that have a given <code>term</code> indexed.
* This is useful if one uses a document field to hold a unique ID string for
* the document. Then to delete such a document, one merely constructs a
* term with the appropriate field and the unique ID string as its text and
* passes it to this method.
* See {@link #deleteDocument(int)} for information about when this deletion will
* become effective.
*
* @return the number of documents deleted
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
DocsEnum docs = MultiFields.getTermDocsEnum(this,
MultiFields.getLiveDocs(this),
term.field(),
term.bytes());
if (docs == null) return 0;
int n = 0;
int doc;
while ((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
deleteDocument(doc);
n++;
}
return n;
}
/** Undeletes all documents currently marked as deleted in
* this index.
*
* <p>NOTE: this method can only recover documents marked
* for deletion but not yet removed from the index; when
* and how Lucene removes deleted documents is an
* implementation detail, subject to change from release
* to release. However, you can use {@link
* #numDeletedDocs} on the current IndexReader instance to
* see how many documents will be un-deleted.
*
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doUndeleteAll();
}
/** Implements actual undeleteAll() in subclass. */
protected abstract void doUndeleteAll() throws CorruptIndexException, IOException;
/** Does nothing by default. Subclasses that require a write lock for
* index modifications must implement this method. */
protected synchronized void acquireWriteLock() throws IOException {

View File

@ -41,8 +41,6 @@ public class MultiReader extends IndexReader implements Cloneable {
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll operations is
* left to the subreaders. </p>
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
*/
@ -52,8 +50,6 @@ public class MultiReader extends IndexReader implements Cloneable {
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll operations is
* left to the subreaders. </p>
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
@ -243,23 +239,6 @@ public class MultiReader extends IndexReader implements Cloneable {
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}

View File

@ -332,24 +332,6 @@ public class ParallelReader extends IndexReader {
return hasDeletions;
}
// delete in all readers
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.deleteDocument(n);
}
hasDeletions = true;
}
// undeleteAll in all readers
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.undeleteAll();
}
hasDeletions = false;
}
@Override
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
ensureOpen();

View File

@ -183,6 +183,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
* @param bv BitVector to clone
* @return New BitVector
*/
// nocommit: remove deletions from SR
protected BitVector cloneDeletedDocs(BitVector bv) {
ensureOpen();
return (BitVector)bv.clone();
@ -321,6 +322,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
// nocommit: remove deletions from SR
private synchronized void commitChanges(Map<String,String> commitUserData) throws IOException {
if (liveDocsDirty) { // re-write deleted
si.advanceDelGen();
@ -399,8 +401,16 @@ public class SegmentReader extends IndexReader implements Cloneable {
return si.hasSeparateNorms();
}
@Override
protected void doDelete(int docNum) {
// nocommit: remove deletions from SR
synchronized void deleteDocument(int docNum) throws IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doDelete(docNum);
}
// nocommit: remove deletions from SR
void doDelete(int docNum) {
if (liveDocs == null) {
liveDocs = new BitVector(maxDoc());
liveDocs.setAll();
@ -421,23 +431,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
}
}
@Override
protected void doUndeleteAll() {
liveDocsDirty = false;
if (liveDocs != null) {
assert liveDocsRef != null;
liveDocsRef.decrementAndGet();
liveDocs = null;
liveDocsRef = null;
pendingDeleteCount = 0;
si.clearDelGen();
si.setDelCount(0);
} else {
assert liveDocsRef == null;
assert pendingDeleteCount == 0;
}
}
List<String> files() throws IOException {
return new ArrayList<String>(si.files());
}

View File

@ -70,13 +70,6 @@ public class TestBinaryDocument extends LuceneTestCase {
assertTrue(stringFldStoredTest.equals(binaryValStored));
writer.close();
reader.close();
reader = IndexReader.open(dir, false);
/** delete the document from index */
reader.deleteDocument(0);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
}

View File

@ -412,6 +412,7 @@ public class TestAddIndexes extends LuceneTestCase {
aux.close();
}
/* nocommit: reactivate these tests
// case 4: tail segments, invariants hold, copy, invariants not hold
public void testMergeAfterCopy() throws IOException {
// main directory
@ -496,6 +497,7 @@ public class TestAddIndexes extends LuceneTestCase {
aux.close();
aux2.close();
}
*/
private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
throws IOException {

View File

@ -459,13 +459,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
addNoProxDoc(writer);
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir, false);
writer = new IndexWriter(dir,
conf.setMergePolicy(doCFS ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES)
);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
reader.close();
writer.deleteDocuments(searchTerm);
writer.close();
}
dir.close();
@ -501,12 +500,14 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir, false);
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
reader.close();
writer.deleteDocuments(searchTerm);
writer.close();
// Now verify file names:
String[] expected = new String[] {"_0.cfs", "_0.cfe",

View File

@ -36,20 +36,19 @@ public class TestCheckIndex extends LuceneTestCase {
public void testDeletedDocs() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
doc.add(newField("field", "aaa", customType));
for(int i=0;i<19;i++) {
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
doc.add(newField("field", "aaa"+i, customType));
writer.addDocument(doc);
}
writer.forceMerge(1);
writer.commit();
writer.deleteDocuments(new Term("field","aaa5"));
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(5);
reader.close();
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
CheckIndex checker = new CheckIndex(dir);
@ -73,7 +72,7 @@ public class TestCheckIndex extends LuceneTestCase {
assertNotNull(seg.termIndexStatus);
assertNull(seg.termIndexStatus.error);
assertEquals(1, seg.termIndexStatus.termCount);
assertEquals(19, seg.termIndexStatus.termCount);
assertEquals(19, seg.termIndexStatus.totFreq);
assertEquals(18, seg.termIndexStatus.totPos);

View File

@ -155,52 +155,4 @@ public class TestCrash extends LuceneTestCase {
reader.close();
dir.close();
}
public void testCrashReaderDeletes() throws IOException {
IndexWriter writer = initIndex(random, false);
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
writer.close(false);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(3);
dir.crash();
/*
String[] l = dir.list();
Arrays.sort(l);
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
reader = IndexReader.open(dir, false);
assertEquals(157, reader.numDocs());
reader.close();
dir.clearCrash();
dir.close();
}
public void testCrashReaderDeletesAfterClose() throws IOException {
IndexWriter writer = initIndex(random, false);
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
writer.close(false);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(3);
reader.close();
dir.crash();
/*
String[] l = dir.list();
Arrays.sort(l);
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
reader = IndexReader.open(dir, false);
assertEquals(156, reader.numDocs());
reader.close();
dir.close();
}
}

View File

@ -603,142 +603,11 @@ public class TestDeletionPolicy extends LuceneTestCase {
}
}
/*
* Test a deletion policy that keeps last N commits
* around, with reader doing deletes.
*/
public void testKeepLastNDeletionPolicyWithReader() throws IOException {
final int N = 10;
for(int pass=0;pass<2;pass++) {
if (VERBOSE) {
System.out.println("TEST: pass=" + pass);
}
boolean useCompoundFile = (pass % 2) != 0;
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
MergePolicy mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
}
IndexWriter writer = new IndexWriter(dir, conf);
writer.close();
Term searchTerm = new Term("content", "aaa");
Query query = new TermQuery(searchTerm);
for(int i=0;i<N+1;i++) {
if (VERBOSE) {
System.out.println("\nTEST: write i=" + i);
}
conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
}
writer = new IndexWriter(dir, conf);
for(int j=0;j<17;j++) {
addDoc(writer);
}
// this is a commit
if (VERBOSE) {
System.out.println("TEST: close writer");
}
writer.close();
IndexReader reader = IndexReader.open(dir, policy, false);
reader.deleteDocument(3*i+1);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(16*(1+i), hits.length);
// this is a commit
if (VERBOSE) {
System.out.println("TEST: close reader numOnCommit=" + policy.numOnCommit);
}
reader.close();
searcher.close();
}
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
}
IndexReader r = IndexReader.open(dir);
final boolean wasFullyMerged = r.getSequentialSubReaders().length == 1 && !r.hasDeletions();
r.close();
writer = new IndexWriter(dir, conf);
writer.forceMerge(1);
// this is a commit
writer.close();
assertEquals(2*(N+1)+1, policy.numOnInit);
assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit);
IndexReader rwReader = IndexReader.open(dir, false);
IndexSearcher searcher = new IndexSearcher(rwReader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(176, hits.length);
// Simplistic check: just verify only the past N segments_N's still
// exist, and, I can open a reader on each:
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
int expectedCount = 176;
searcher.close();
rwReader.close();
for(int i=0;i<N+1;i++) {
if (VERBOSE) {
System.out.println("TEST: i=" + i);
}
try {
IndexReader reader = IndexReader.open(dir, true);
if (VERBOSE) {
System.out.println(" got reader=" + reader);
}
// Work backwards in commits on what the expected
// count should be.
searcher = newSearcher(reader);
hits = searcher.search(query, null, 1000).scoreDocs;
if (i > 1) {
if (i % 2 == 0) {
expectedCount += 1;
} else {
expectedCount -= 17;
}
}
assertEquals("maxDoc=" + searcher.getIndexReader().maxDoc() + " numDocs=" + searcher.getIndexReader().numDocs(), expectedCount, hits.length);
searcher.close();
reader.close();
if (i == N) {
fail("should have failed on commits before last 5");
}
} catch (IOException e) {
if (i != N) {
throw e;
}
}
if (i < N) {
dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
}
gen--;
}
dir.close();
}
}
/*
* Test a deletion policy that keeps last N commits
* around, through creates.
*/
/* nocommit: fix this test, I don't understand it!
public void testKeepLastNDeletionPolicyWithCreates() throws IOException {
final int N = 10;
@ -849,6 +718,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
dir.close();
}
}
*/
private void addDoc(IndexWriter writer) throws IOException
{

View File

@ -74,12 +74,7 @@ public class TestDirectoryReader extends LuceneTestCase {
return reader;
}
public void test() throws Exception {
doTestDocument();
doTestUndeleteAll();
}
public void doTestDocument() throws IOException {
public void testDocument() throws IOException {
sis.read(dir);
IndexReader reader = openReader();
assertTrue(reader != null);
@ -94,43 +89,6 @@ public class TestDirectoryReader extends LuceneTestCase {
TestSegmentReader.checkNorms(reader);
reader.close();
}
public void doTestUndeleteAll() throws IOException {
sis.read(dir);
IndexReader reader = openReader();
assertTrue(reader != null);
assertEquals( 2, reader.numDocs() );
reader.deleteDocument(0);
assertEquals( 1, reader.numDocs() );
reader.undeleteAll();
assertEquals( 2, reader.numDocs() );
// Ensure undeleteAll survives commit/close/reopen:
reader.commit();
reader.close();
if (reader instanceof MultiReader)
// MultiReader does not "own" the directory so it does
// not write the changes to sis on commit:
sis.commit(dir, sis.codecFormat());
sis.read(dir);
reader = openReader();
assertEquals( 2, reader.numDocs() );
reader.deleteDocument(0);
assertEquals( 1, reader.numDocs() );
reader.commit();
reader.close();
if (reader instanceof MultiReader)
// MultiReader does not "own" the directory so it does
// not write the changes to sis on commit:
sis.commit(dir, sis.codecFormat());
sis.read(dir);
reader = openReader();
assertEquals( 1, reader.numDocs() );
reader.close();
}
public void testIsCurrent() throws IOException {
Directory ramDir1=newDirectory();

View File

@ -68,11 +68,14 @@ public class TestIndexFileDeleter extends LuceneTestCase {
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir, false);
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
reader.close();
writer.deleteDocuments(searchTerm);
writer.close();
// Now, artificially create an extra .del file & extra
// .s0 file:

View File

@ -52,64 +52,8 @@ import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Bits;
public class TestIndexReader extends LuceneTestCase
{
public void testCommitUserData() throws Exception {
Directory d = newDirectory();
Map<String,String> commitUserData = new HashMap<String,String>();
commitUserData.put("foo", "fighters");
// set up writer
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(2));
for(int i=0;i<27;i++)
addDocumentWithFields(writer);
writer.close();
IndexReader r = IndexReader.open(d, false);
r.deleteDocument(5);
r.flush(commitUserData);
IndexCommit c = r.getIndexCommit();
r.close();
SegmentInfos sis = new SegmentInfos();
sis.read(d);
IndexReader r2 = IndexReader.open(d, false);
assertEquals(c.getUserData(), commitUserData);
assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
// Change the index
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setMaxBufferedDocs(2));
for(int i=0;i<7;i++)
addDocumentWithFields(writer);
writer.close();
IndexReader r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3);
assertFalse(c.equals(r3.getIndexCommit()));
assertFalse(r2.getIndexCommit().getSegmentCount() == 1 && !r2.hasDeletions());
r3.close();
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
writer.forceMerge(1);
writer.close();
r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3);
assertEquals(1, r3.getIndexCommit().getSegmentCount());
r2.close();
r3.close();
d.close();
}
public class TestIndexReader extends LuceneTestCase {
public void testIsCurrent() throws Exception {
Directory d = newDirectory();
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
@ -388,79 +332,6 @@ public class TestIndexReader extends LuceneTestCase
dir.close();
}
// Make sure attempts to make changes after reader is
// closed throws IOException:
public void testChangesAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
Term searchTerm = new Term("content", "aaa");
// add 11 documents with term : aaa
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < 11; i++) {
addDoc(writer, searchTerm.text());
}
writer.close();
reader = IndexReader.open(dir, false);
// Close reader:
reader.close();
// Then, try to make changes:
try {
reader.deleteDocument(4);
fail("deleteDocument after close failed to throw IOException");
} catch (AlreadyClosedException e) {
// expected
}
try {
reader.undeleteAll();
fail("undeleteAll after close failed to throw IOException");
} catch (AlreadyClosedException e) {
// expected
}
dir.close();
}
// Make sure we get lock obtain failed exception with 2 writers:
public void testLockObtainFailed() throws IOException {
Directory dir = newDirectory();
Term searchTerm = new Term("content", "aaa");
// add 11 documents with term : aaa
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.commit();
for (int i = 0; i < 11; i++) {
addDoc(writer, searchTerm.text());
}
// Create reader:
IndexReader reader = IndexReader.open(dir, false);
// Try to make changes
try {
reader.deleteDocument(4);
fail("deleteDocument should have hit LockObtainFailedException");
} catch (LockObtainFailedException e) {
// expected
}
try {
reader.undeleteAll();
fail("undeleteAll should have hit LockObtainFailedException");
} catch (LockObtainFailedException e) {
// expected
}
writer.close();
reader.close();
dir.close();
}
/* ??? public void testOpenEmptyDirectory() throws IOException{
String dirName = "test.empty";
File fileDirName = new File(dirName);
@ -560,69 +431,6 @@ public class TestIndexReader extends LuceneTestCase
dir.close();
}
public void testLock() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocumentWithFields(writer);
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
IndexReader reader = IndexReader.open(dir, false);
try {
reader.deleteDocument(0);
fail("expected lock");
} catch(IOException e) {
// expected exception
}
try {
IndexWriter.unlock(dir); // this should not be done in the real world!
} catch (LockReleaseFailedException lrfe) {
writer.close();
}
reader.deleteDocument(0);
reader.close();
writer.close();
dir.close();
}
public void testDocsOutOfOrderJIRA140() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for(int i=0;i<11;i++) {
addDoc(writer, "aaa");
}
writer.close();
IndexReader reader = IndexReader.open(dir, false);
// Try to delete an invalid docId, yet, within range
// of the final bits of the BitVector:
boolean gotException = false;
try {
reader.deleteDocument(11);
} catch (ArrayIndexOutOfBoundsException e) {
gotException = true;
}
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
// We must add more docs to get a new segment written
for(int i=0;i<11;i++) {
addDoc(writer, "aaa");
}
// Without the fix for LUCENE-140 this call will
// [incorrectly] hit a "docs out of order"
// IllegalStateException because above out-of-bounds
// deleteDocument corrupted the index:
writer.forceMerge(1);
writer.close();
if (!gotException) {
fail("delete of out-of-bounds doc number failed to hit exception");
}
dir.close();
}
public void testOpenReaderAfterDelete() throws IOException {
File dirFile = _TestUtil.getTempDir("deletetest");
Directory dir = newFSDirectory(dirFile);
@ -860,96 +668,6 @@ public class TestIndexReader extends LuceneTestCase
d.close();
}
public void testReadOnly() throws Throwable {
Directory d = newDirectory();
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocumentWithFields(writer);
writer.commit();
addDocumentWithFields(writer);
writer.close();
IndexReader r = IndexReader.open(d, true);
try {
r.deleteDocument(0);
fail();
} catch (UnsupportedOperationException uoe) {
// expected
}
writer = new IndexWriter(
d,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMergePolicy(newLogMergePolicy(10))
);
addDocumentWithFields(writer);
writer.close();
// Make sure reopen is still readonly:
IndexReader r2 = IndexReader.openIfChanged(r);
assertNotNull(r2);
r.close();
assertFalse(r == r2);
try {
r2.deleteDocument(0);
fail();
} catch (UnsupportedOperationException uoe) {
// expected
}
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
writer.forceMerge(1);
writer.close();
// Make sure reopen to a single segment is still readonly:
IndexReader r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3);
assertFalse(r3 == r2);
r2.close();
assertFalse(r == r2);
try {
r3.deleteDocument(0);
fail();
} catch (UnsupportedOperationException uoe) {
// expected
}
// Make sure write lock isn't held
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
writer.close();
r3.close();
d.close();
}
// LUCENE-1474
public void testIndexReader() throws Exception {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.addDocument(createDocument("a"));
writer.addDocument(createDocument("b"));
writer.addDocument(createDocument("c"));
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("id", "a"));
reader.flush();
reader.deleteDocuments(new Term("id", "b"));
reader.close();
IndexReader.open(dir,true).close();
dir.close();
}
static Document createDocument(String id) {
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);

View File

@ -33,143 +33,6 @@ import org.apache.lucene.util.Bits;
*/
public class TestIndexReaderClone extends LuceneTestCase {
public void testCloneReadOnlySegmentReader() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
IndexReader reader = IndexReader.open(dir1, false);
IndexReader readOnlyReader = reader.clone(true);
if (!isReadOnly(readOnlyReader)) {
fail("reader isn't read only");
}
if (deleteWorked(1, readOnlyReader)) {
fail("deleting from the original should not have worked");
}
reader.close();
readOnlyReader.close();
dir1.close();
}
// open non-readOnly reader1, clone to non-readOnly
// reader2, make sure we can change reader2
public void testCloneNoChangesStillReadOnly() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader r1 = IndexReader.open(dir1, false);
IndexReader r2 = r1.clone(false);
if (!deleteWorked(1, r2)) {
fail("deleting from the cloned should have worked");
}
r1.close();
r2.close();
dir1.close();
}
// open non-readOnly reader1, clone to non-readOnly
// reader2, make sure we can change reader1
public void testCloneWriteToOrig() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader r1 = IndexReader.open(dir1, false);
IndexReader r2 = r1.clone(false);
if (!deleteWorked(1, r1)) {
fail("deleting from the original should have worked");
}
r1.close();
r2.close();
dir1.close();
}
// open non-readOnly reader1, clone to non-readOnly
// reader2, make sure we can change reader2
public void testCloneWriteToClone() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader r1 = IndexReader.open(dir1, false);
IndexReader r2 = r1.clone(false);
if (!deleteWorked(1, r2)) {
fail("deleting from the original should have worked");
}
// should fail because reader1 holds the write lock
assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
r2.close();
// should fail because we are now stale (reader1
// committed changes)
assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
r1.close();
dir1.close();
}
// open non-readOnly reader1, clone to readOnly reader2
public void testCloneWriteableToReadOnly() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader = IndexReader.open(dir1, false);
IndexReader readOnlyReader = reader.clone(true);
if (!isReadOnly(readOnlyReader)) {
fail("reader isn't read only");
}
if (deleteWorked(1, readOnlyReader)) {
fail("deleting from the original should not have worked");
}
// this readonly reader shouldn't have a write lock
if (readOnlyReader.hasChanges) {
fail("readOnlyReader has a write lock");
}
reader.close();
readOnlyReader.close();
dir1.close();
}
// open non-readOnly reader1, reopen to readOnly reader2
public void testReopenWriteableToReadOnly() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader = IndexReader.open(dir1, false);
final int docCount = reader.numDocs();
assertTrue(deleteWorked(1, reader));
assertEquals(docCount-1, reader.numDocs());
IndexReader readOnlyReader = IndexReader.openIfChanged(reader, true);
assertNotNull(readOnlyReader);
if (!isReadOnly(readOnlyReader)) {
fail("reader isn't read only");
}
assertFalse(deleteWorked(1, readOnlyReader));
assertEquals(docCount-1, readOnlyReader.numDocs());
reader.close();
readOnlyReader.close();
dir1.close();
}
// open readOnly reader1, clone to non-readOnly reader2
public void testCloneReadOnlyToWriteable() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader1 = IndexReader.open(dir1, true);
IndexReader reader2 = reader1.clone(false);
if (isReadOnly(reader2)) {
fail("reader should not be read only");
}
assertFalse("deleting from the original reader should not have worked", deleteWorked(1, reader1));
// this readonly reader shouldn't yet have a write lock
if (reader2.hasChanges) {
fail("cloned reader should not have write lock");
}
assertTrue("deleting from the cloned reader should have worked", deleteWorked(1, reader2));
reader1.close();
reader2.close();
dir1.close();
}
// open non-readOnly reader1 on multi-segment index, then
// fully merge the index, then clone to readOnly reader2
public void testReadOnlyCloneAfterFullMerge() throws Exception {
@ -188,17 +51,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
dir1.close();
}
private static boolean deleteWorked(int doc, IndexReader r) {
boolean exception = false;
try {
// trying to delete from the original reader should throw an exception
r.deleteDocument(doc);
} catch (Exception ex) {
exception = true;
}
return !exception;
}
public void testCloneReadOnlyDirectoryReader() throws Exception {
final Directory dir1 = newDirectory();
@ -223,19 +75,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
}
}
public void testSegmentReaderUndeleteall() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
origSegmentReader.deleteDocument(10);
assertDelDocsRefCountEquals(1, origSegmentReader);
origSegmentReader.undeleteAll();
assertNull(origSegmentReader.liveDocsRef);
origSegmentReader.close();
// need to test norms?
dir1.close();
}
public void testSegmentReaderCloseReferencing() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
@ -250,135 +89,10 @@ public class TestIndexReaderClone extends LuceneTestCase {
clonedSegmentReader.close();
dir1.close();
}
public void testSegmentReaderDelDocsReferenceCounting() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
IndexReader origReader = IndexReader.open(dir1, false);
SegmentReader origSegmentReader = getOnlySegmentReader(origReader);
// liveDocsRef should be null because nothing has updated yet
assertNull(origSegmentReader.liveDocsRef);
// we deleted a document, so there is now a liveDocs bitvector and a
// reference to it
origReader.deleteDocument(1);
assertDelDocsRefCountEquals(1, origSegmentReader);
// the cloned segmentreader should have 2 references, 1 to itself, and 1 to
// the original segmentreader
IndexReader clonedReader = (IndexReader) origReader.clone();
SegmentReader clonedSegmentReader = getOnlySegmentReader(clonedReader);
assertDelDocsRefCountEquals(2, origSegmentReader);
// deleting a document creates a new liveDocs bitvector, the refs goes to
// 1
clonedReader.deleteDocument(2);
assertDelDocsRefCountEquals(1, origSegmentReader);
assertDelDocsRefCountEquals(1, clonedSegmentReader);
// make sure the deletedocs objects are different (copy
// on write)
assertTrue(origSegmentReader.liveDocs != clonedSegmentReader.liveDocs);
assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
final Bits liveDocs = origSegmentReader.getLiveDocs();
assertTrue(liveDocs == null || liveDocs.get(2)); // doc 2 should not be deleted
// in original segmentreader
assertFalse(clonedSegmentReader.getLiveDocs().get(2)); // doc 2 should be deleted in
// cloned segmentreader
// deleting a doc from the original segmentreader should throw an exception
try {
origReader.deleteDocument(4);
fail("expected exception");
} catch (LockObtainFailedException lbfe) {
// expected
}
origReader.close();
// try closing the original segment reader to see if it affects the
// clonedSegmentReader
clonedReader.deleteDocument(3);
clonedReader.flush();
assertDelDocsRefCountEquals(1, clonedSegmentReader);
// test a reopened reader
IndexReader reopenedReader = IndexReader.openIfChanged(clonedReader);
if (reopenedReader == null) {
reopenedReader = clonedReader;
}
IndexReader cloneReader2 = (IndexReader) reopenedReader.clone();
SegmentReader cloneSegmentReader2 = getOnlySegmentReader(cloneReader2);
assertDelDocsRefCountEquals(2, cloneSegmentReader2);
clonedReader.close();
reopenedReader.close();
cloneReader2.close();
dir1.close();
}
// LUCENE-1648
public void testCloneWithDeletes() throws Throwable {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
IndexReader origReader = IndexReader.open(dir1, false);
origReader.deleteDocument(1);
IndexReader clonedReader = (IndexReader) origReader.clone();
origReader.close();
clonedReader.close();
IndexReader r = IndexReader.open(dir1, false);
assertFalse(MultiFields.getLiveDocs(r).get(1));
r.close();
dir1.close();
}
private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
int doc) {
assertEquals(reader.getLiveDocs().get(doc), reader2.getLiveDocs().get(doc));
}
private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
assertEquals(refCount, reader.liveDocsRef.get());
}
public void testCloneSubreaders() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader = IndexReader.open(dir1, false);
reader.deleteDocument(1); // acquire write lock
IndexReader[] subs = reader.getSequentialSubReaders();
assert subs.length > 1;
IndexReader[] clones = new IndexReader[subs.length];
for (int x=0; x < subs.length; x++) {
clones[x] = (IndexReader) subs[x].clone();
}
reader.close();
for (int x=0; x < subs.length; x++) {
clones[x].close();
}
dir1.close();
}
public void testLucene1516Bug() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
IndexReader r1 = IndexReader.open(dir1, false);
r1.incRef();
IndexReader r2 = r1.clone(false);
r1.deleteDocument(5);
r1.decRef();
r1.incRef();
r2.close();
r1.decRef();
r1.close();
dir1.close();
}
public void testCloseStoredFields() throws Exception {
final Directory dir = newDirectory();

View File

@ -1,375 +0,0 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.LuceneTestCase;
import static org.apache.lucene.index.TestIndexReader.addDoc;
import static org.apache.lucene.index.TestIndexReader.addDocumentWithFields;
import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount;
import static org.apache.lucene.index.TestIndexReader.createDocument;
public class TestIndexReaderDelete extends LuceneTestCase {
private void deleteReaderReaderConflict(boolean doFullMerge) throws IOException {
Directory dir = newDirectory();
Term searchTerm1 = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
Term searchTerm3 = new Term("content", "ccc");
// add 100 documents with term : aaa
// add 100 documents with term : bbb
// add 100 documents with term : ccc
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
for (int i = 0; i < 100; i++) {
addDoc(writer, searchTerm1.text());
addDoc(writer, searchTerm2.text());
addDoc(writer, searchTerm3.text());
}
if (doFullMerge) {
writer.forceMerge(1);
}
writer.close();
// OPEN TWO READERS
// Both readers get segment info as exists at this time
IndexReader reader1 = IndexReader.open(dir, false);
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("first opened", reader1, searchTerm1, 100);
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
IndexReader reader2 = IndexReader.open(dir, false);
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
assertTermDocsCount("first opened", reader2, searchTerm1, 100);
assertTermDocsCount("first opened", reader2, searchTerm2, 100);
assertTermDocsCount("first opened", reader2, searchTerm3, 100);
// DELETE DOCS FROM READER 2 and CLOSE IT
// delete documents containing term: aaa
// when the reader is closed, the segment info is updated and
// the first reader is now stale
reader2.deleteDocuments(searchTerm1);
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
reader2.close();
// Make sure reader 1 is unchanged since it was open earlier
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
// ATTEMPT TO DELETE FROM STALE READER
// delete documents containing term: bbb
try {
reader1.deleteDocuments(searchTerm2);
fail("Delete allowed from a stale index reader");
} catch (IOException e) {
/* success */
}
// RECREATE READER AND TRY AGAIN
reader1.close();
reader1 = IndexReader.open(dir, false);
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("reopened", reader1, searchTerm1, 0);
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
reader1.deleteDocuments(searchTerm2);
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
reader1.close();
// Open another reader to confirm that everything is deleted
reader2 = IndexReader.open(dir, false);
assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
reader2.close();
dir.close();
}
private void deleteReaderWriterConflict(boolean doFullMerge) throws IOException {
//Directory dir = new RAMDirectory();
Directory dir = newDirectory();
Term searchTerm = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
// add 100 documents with term : aaa
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
for (int i = 0; i < 100; i++) {
addDoc(writer, searchTerm.text());
}
writer.close();
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
IndexReader reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 0);
// add 100 documents with term : bbb
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
for (int i = 0; i < 100; i++) {
addDoc(writer, searchTerm2.text());
}
// REQUEST full merge
// This causes a new segment to become current for all subsequent
// searchers. Because of this, deletions made via a previously open
// reader, which would be applied to that reader's segment, are lost
// for subsequent searchers/readers
if (doFullMerge) {
writer.forceMerge(1);
}
writer.close();
// The reader should not see the new data
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 0);
// DELETE DOCUMENTS CONTAINING TERM: aaa
// NOTE: the reader was created when only "aaa" documents were in
int deleted = 0;
try {
deleted = reader.deleteDocuments(searchTerm);
fail("Delete allowed on an index reader with stale segment information");
} catch (StaleReaderException e) {
/* success */
}
// Re-open index reader and try again. This time it should see
// the new data.
reader.close();
reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 100);
deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir, false);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.close();
dir.close();
}
public void testBasicDelete() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = null;
IndexReader reader = null;
Term searchTerm = new Term("content", "aaa");
// add 100 documents with term : aaa
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < 100; i++) {
addDoc(writer, searchTerm.text());
}
writer.close();
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("first reader", reader, searchTerm, 100);
reader.close();
// DELETE DOCUMENTS CONTAINING TERM: aaa
int deleted = 0;
reader = IndexReader.open(dir, false);
deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
// open a 2nd reader to make sure first reader can
// commit its changes (.del) while second reader
// is open:
IndexReader reader2 = IndexReader.open(dir, false);
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir, false);
assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
reader.close();
reader2.close();
dir.close();
}
public void testDeleteReaderReaderConflictNoFullMerge() throws IOException {
deleteReaderReaderConflict(false);
}
public void testDeleteReaderReaderConflictFullMerge() throws IOException {
deleteReaderReaderConflict(true);
}
public void testDeleteReaderWriterConflictNoFullMerge() throws IOException {
deleteReaderWriterConflict(false);
}
public void testDeleteReaderWriterConflictFullMerge() throws IOException {
deleteReaderWriterConflict(true);
}
public void testMultiReaderDeletes() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
doc.add(newField("f", "doctor", StringField.TYPE_UNSTORED));
w.addDocument(doc);
doc = new Document();
w.commit();
doc.add(newField("f", "who", StringField.TYPE_UNSTORED));
w.addDocument(doc);
IndexReader r = new SlowMultiReaderWrapper(w.getReader());
w.close();
assertNull(r.getLiveDocs());
r.close();
r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
assertNull(r.getLiveDocs());
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
assertNotNull(r.getLiveDocs());
assertFalse(r.getLiveDocs().get(0));
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
assertFalse(r.getLiveDocs().get(1));
r.close();
dir.close();
}
public void testUndeleteAll() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.deleteDocument(1);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir, false);
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
dir.close();
}
public void testUndeleteAllAfterClose() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.close();
reader = IndexReader.open(dir, false);
reader.undeleteAll();
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
dir.close();
}
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.close();
reader = IndexReader.open(dir, false);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir, false);
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
dir.close();
}
// LUCENE-1647
public void testIndexReaderUnDeleteAll() throws Exception {
MockDirectoryWrapper dir = newDirectory();
dir.setPreventDoubleWrite(false);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.addDocument(createDocument("a"));
writer.addDocument(createDocument("b"));
writer.addDocument(createDocument("c"));
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("id", "a"));
reader.flush();
reader.deleteDocuments(new Term("id", "b"));
reader.undeleteAll();
reader.deleteDocuments(new Term("id", "b"));
reader.close();
IndexReader.open(dir,true).close();
dir.close();
}
}

View File

@ -852,14 +852,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
assertNotNull(r2);
assertTrue(r2 != r);
// Reader should be readOnly
try {
r2.deleteDocument(0);
fail("no exception hit");
} catch (UnsupportedOperationException uoe) {
// expected
}
final Map<String,String> s = commit.getUserData();
final int v;
if (s.size() == 0) {

View File

@ -90,19 +90,19 @@ public class TestIndexWriter extends LuceneTestCase {
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
addDocWithIndex(writer,i);
}
assertEquals(100, writer.maxDoc());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir, false);
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
writer.deleteDocuments(new Term("id", ""+i));
}
reader.close();
writer.close();
reader = IndexReader.open(dir, true);
reader = IndexReader.open(dir);
assertEquals(60, reader.numDocs());
reader.close();
@ -115,7 +115,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir, true);
reader = IndexReader.open(dir);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
@ -696,7 +696,6 @@ public class TestIndexWriter extends LuceneTestCase {
public void testVariableSchema() throws Exception {
Directory dir = newDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + i);
@ -728,9 +727,6 @@ public class TestIndexWriter extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
if (0 == i % 4) {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));

View File

@ -473,29 +473,6 @@ public class TestIndexWriterCommit extends LuceneTestCase {
assertNotNull(commit);
IndexReader r = IndexReader.open(commit, true);
assertEquals(2, r.numDocs());
r.close();
// open "second", w/ writeable IndexReader & commit
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
assertEquals(2, r.numDocs());
r.deleteDocument(0);
r.deleteDocument(1);
commitData.put("tag", "fourth");
r.commit(commitData);
r.close();
// make sure "third" commit is still there
commit = null;
for(IndexCommit c : IndexReader.listCommits(dir)) {
if (c.getUserData().get("tag").equals("third")) {
commit = c;
break;
}
}
assertNotNull(commit);
dir.close();
}

View File

@ -192,9 +192,14 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("content", "aaa"));
reader.close();
// delete some docs without merging
writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
);
writer.deleteDocuments(new Term("content", "aaa"));
writer.close();
ldmp = new LogDocMergePolicy();
ldmp.setMergeFactor(5);

View File

@ -116,6 +116,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
writer.close();
}
/* nocommit: Fix tests to use an id and delete by term
// LUCENE-325: test forceMergeDeletes, when 2 singular merges
// are required
public void testForceMergeDeletes() throws IOException {
@ -126,8 +127,6 @@ public class TestIndexWriterMerging extends LuceneTestCase
IndexWriterConfig.DISABLE_AUTO_FLUSH));
Document document = new Document();
document = new Document();
FieldType customType = new FieldType();
customType.setStored(true);
@ -270,6 +269,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
ir.close();
dir.close();
}
*/
// Just intercepts all merges & verifies that we are never
// merging a segment with >= 20 (maxMergeDocs) docs

View File

@ -70,6 +70,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
* to have TermVectors. Adding this index to another index should not throw
* any exception.
*/
/* nocommit: Fix tests to use an id and delete by term
public void testEmptyIndexWithVectors() throws IOException {
Directory rd1 = newDirectory();
{
@ -121,4 +122,5 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
rdOut.close();
}
*/
}

View File

@ -276,6 +276,7 @@ public class TestSizeBoundedForceMerge extends LuceneTestCase {
assertEquals(4, sis.size());
}
/* nocommit: Fix tests to use an id and delete by term
public void testSingleMergeableSegment() throws Exception {
Directory dir = new RAMDirectory();
@ -308,6 +309,7 @@ public class TestSizeBoundedForceMerge extends LuceneTestCase {
assertEquals(3, sis.size());
assertFalse(sis.info(2).hasDeletions());
}
*/
public void testSingleNonMergeableSegment() throws Exception {
Directory dir = new RAMDirectory();
@ -334,6 +336,7 @@ public class TestSizeBoundedForceMerge extends LuceneTestCase {
assertEquals(1, sis.size());
}
/* nocommit: Fix tests to use an id and delete by term
public void testSingleMergeableTooLargeSegment() throws Exception {
Directory dir = new RAMDirectory();
@ -364,5 +367,6 @@ public class TestSizeBoundedForceMerge extends LuceneTestCase {
assertEquals(1, sis.size());
assertTrue(sis.info(0).hasDeletions());
}
*/
}

View File

@ -58,6 +58,7 @@ public class TestSumDocFreq extends LuceneTestCase {
assertSumDocFreq(ir);
ir.close();
/* nocommit: fix this to use IW to delete documents
ir = IndexReader.open(dir, false);
int numDeletions = atLeast(20);
for (int i = 0; i < numDeletions; i++) {
@ -72,6 +73,7 @@ public class TestSumDocFreq extends LuceneTestCase {
ir = IndexReader.open(dir, true);
assertSumDocFreq(ir);
ir.close();
*/
dir.close();
}

View File

@ -70,10 +70,12 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
hits = is.search(bq, null, 1000).scoreDocs;
assertEquals(1, hits.length);
/* nocommit: fix this test to delete a document with IW
// delete a document:
is.getIndexReader().deleteDocument(0);
hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
assertEquals(2, hits.length);
*/
is.close();
ir.close();

View File

@ -240,6 +240,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
}
}
/* nocommit: fix deletions to use IW
public void testSetBufferSize() throws IOException {
File indexDir = _TestUtil.getTempDir("testSetBufferSize");
MockFSDirectory dir = new MockFSDirectory(indexDir, random);
@ -289,6 +290,7 @@ public class TestBufferedIndexInput extends LuceneTestCase {
_TestUtil.rmDir(indexDir);
}
}
*/
private static class MockFSDirectory extends Directory {