mirror of https://github.com/apache/lucene.git
merge trunk (1211241:1212396)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3622@1212405 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
a9011dbd91
|
@ -207,6 +207,18 @@ Changes in backwards compatibility policy
|
|||
* LUCENE-3533: Removed SpanFilters, they created large lists of objects and
|
||||
did not scale. (Robert Muir)
|
||||
|
||||
* LUCENE-3606: IndexReader was made read-only. It is no longer possible to
|
||||
delete or undelete documents using IndexReader; you have to use IndexWriter
|
||||
now. As deleting by internal Lucene docID is no longer possible, this
|
||||
requires adding a unique identifier field to your index. Deleting/relying
|
||||
upon Lucene docIDs is not recommended anyway, because they can change.
|
||||
Consequently commit() was removed and IndexReader.open(), openIfChanged(),
|
||||
and clone() no longer take readOnly booleans or IndexDeletionPolicy
|
||||
instances. Furthermore, IndexReader.setNorm() was removed. If you need
|
||||
customized norm values, the recommended way to do this is by modifying
|
||||
SimilarityProvider to use an external byte[] or one of the new DocValues
|
||||
fields (LUCENE-3108). (Uwe Schindler, Robert Muir)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
* LUCENE-2846: omitNorms now behaves like omitTermFrequencyAndPositions, if you
|
||||
|
@ -649,6 +661,13 @@ Changes in backwards compatibility policy
|
|||
FieldCacheTermsFilter.FieldCacheTermsFilterDocIdSet was removed and
|
||||
replaced by another internal implementation. (Uwe Schindler)
|
||||
|
||||
* LUCENE-3620: FilterIndexReader now overrides all methods of IndexReader that
|
||||
it should (note that some are still not overridden, as they should be
|
||||
overridden by sub-classes only). In the process, some methods of IndexReader
|
||||
were made final. This is not expected to affect many apps, since these methods
|
||||
already delegate to abstract methods, which you had to already override
|
||||
anyway. (Shai Erera)
|
||||
|
||||
Security fixes
|
||||
|
||||
* LUCENE-3588: Try harder to prevent SIGSEGV on cloned MMapIndexInputs:
|
||||
|
@ -669,6 +688,13 @@ Bug fixes
|
|||
to correctly respect deletions on reopened SegmentReaders. Factored out
|
||||
FieldCacheDocIdSet to be a top-level class. (Uwe Schindler, Simon Willnauer)
|
||||
|
||||
* LUCENE-3627: Don't let an errant 0-byte segments_N file corrupt the index.
|
||||
(Ken McCracken via Mike McCandless)
|
||||
|
||||
Documentation
|
||||
|
||||
* LUCENE-3597: Fixed incorrect grouping documentation. (Martijn van Groningen, Robert Muir)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-3228: Stop downloading external javadoc package-list files:
|
||||
|
|
|
@ -58,7 +58,7 @@ public class FieldTermStack {
|
|||
// writer.addDocument( doc );
|
||||
// writer.close();
|
||||
|
||||
// IndexReader reader = IndexReader.open( dir, true );
|
||||
// IndexReader reader = IndexReader.open(dir1);
|
||||
// new FieldTermStack( reader, 0, "f", fieldQuery );
|
||||
// reader.close();
|
||||
//}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -116,7 +116,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -191,7 +191,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -237,7 +237,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -281,7 +281,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
|
|
@ -1677,7 +1677,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
* writer = new IndexWriter(ramDir,bigramAnalyzer , true); Document d = new
|
||||
* Document(); Field f = new Field(FIELD_NAME, "java abc def", true, true,
|
||||
* true); d.add(f); writer.addDocument(d); writer.close(); IndexReader reader =
|
||||
* IndexReader.open(ramDir, true);
|
||||
* IndexReader.open(ramDir);
|
||||
*
|
||||
* IndexSearcher searcher=new IndexSearcher(reader); query =
|
||||
* QueryParser.parse("abc", FIELD_NAME, bigramAnalyzer);
|
||||
|
@ -1763,7 +1763,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
writer.addDocument(doc, analyzer);
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
reader = IndexReader.open(ramDir, true);
|
||||
reader = IndexReader.open(ramDir);
|
||||
numHighlights = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ public class TokenSourcesTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -162,7 +162,7 @@ public class TokenSourcesTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -210,7 +210,7 @@ public class TokenSourcesTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
@ -259,7 +259,7 @@ public class TokenSourcesTest extends LuceneTestCase {
|
|||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
final IndexReader indexReader = IndexReader.open(directory, true);
|
||||
final IndexReader indexReader = IndexReader.open(directory);
|
||||
try {
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
final IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
|
|
|
@ -364,7 +364,7 @@ public abstract class AbstractTestCase extends LuceneTestCase {
|
|||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
if (reader != null) reader.close();
|
||||
reader = IndexReader.open( dir, true );
|
||||
reader = IndexReader.open(dir);
|
||||
}
|
||||
|
||||
// make 1 doc with multi valued & not analyzed field
|
||||
|
@ -383,7 +383,7 @@ public abstract class AbstractTestCase extends LuceneTestCase {
|
|||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
if (reader != null) reader.close();
|
||||
reader = IndexReader.open( dir, true );
|
||||
reader = IndexReader.open(dir);
|
||||
}
|
||||
|
||||
protected void makeIndexShortMV() throws Exception {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase {
|
|||
writer.addDocument( doc );
|
||||
writer.close();
|
||||
if (reader != null) reader.close();
|
||||
reader = IndexReader.open( dir, true );
|
||||
reader = IndexReader.open(dir);
|
||||
}
|
||||
|
||||
public void test1StrMV() throws Exception {
|
||||
|
|
|
@ -767,11 +767,11 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term term) {
|
||||
Info info = getInfo(term.field());
|
||||
public int docFreq(String field, BytesRef term) {
|
||||
Info info = getInfo(field);
|
||||
int freq = 0;
|
||||
if (info != null) freq = info.getPositions(term.bytes()) != null ? 1 : 0;
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + term + ", freq:" + freq);
|
||||
if (info != null) freq = info.getPositions(term) != null ? 1 : 0;
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + field + ":" + term + ", freq:" + freq);
|
||||
return freq;
|
||||
}
|
||||
|
||||
|
@ -1112,11 +1112,6 @@ public class MemoryIndex {
|
|||
}
|
||||
return norms;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int doc, String fieldName, byte value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
|
@ -1142,21 +1137,6 @@ public class MemoryIndex {
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int docNum) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) {
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.doClose");
|
||||
|
|
|
@ -1,166 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Copyright 2006 The Apache Software Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
/**
|
||||
* Given a directory and a list of fields, updates the fieldNorms in place for every document.
|
||||
*
|
||||
* If Similarity class is specified, uses its computeNorm method to set norms.
|
||||
* If -n command line argument is used, removed field norms, as if
|
||||
* {@link org.apache.lucene.document.FieldType#setOmitNorms(boolean)} was used.
|
||||
*
|
||||
* <p>
|
||||
* NOTE: This will overwrite any length normalization or field/document boosts.
|
||||
* </p>
|
||||
*
|
||||
*/
|
||||
public class FieldNormModifier {
|
||||
|
||||
/**
|
||||
* Command Line Execution method.
|
||||
*
|
||||
* <pre>
|
||||
* Usage: FieldNormModifier /path/index <package.SimilarityClassName | -n> field1 field2 ...
|
||||
* </pre>
|
||||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
if (args.length < 3) {
|
||||
System.err.println("Usage: FieldNormModifier <index> <package.SimilarityClassName | -d> <field1> [field2] ...");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
SimilarityProvider s = null;
|
||||
|
||||
if (args[1].equals("-d"))
|
||||
args[1] = DefaultSimilarity.class.getName();
|
||||
|
||||
try {
|
||||
s = Class.forName(args[1]).asSubclass(SimilarityProvider.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
Directory d = FSDirectory.open(new File(args[0]));
|
||||
FieldNormModifier fnm = new FieldNormModifier(d, s);
|
||||
|
||||
for (int i = 2; i < args.length; i++) {
|
||||
System.out.print("Updating field: " + args[i] + " " + (new Date()).toString() + " ... ");
|
||||
fnm.reSetNorms(args[i]);
|
||||
System.out.println(new Date().toString());
|
||||
}
|
||||
|
||||
d.close();
|
||||
}
|
||||
|
||||
|
||||
private Directory dir;
|
||||
private SimilarityProvider sim;
|
||||
|
||||
/**
|
||||
* Constructor for code that wishes to use this class programmatically
|
||||
* If Similarity is null, kill the field norms.
|
||||
*
|
||||
* @param d the Directory to modify
|
||||
* @param s the Similarity to use (can be null)
|
||||
*/
|
||||
public FieldNormModifier(Directory d, SimilarityProvider s) {
|
||||
dir = d;
|
||||
sim = s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the norms for the specified field.
|
||||
*
|
||||
* <p>
|
||||
* Opens a new IndexReader on the Directory given to this instance,
|
||||
* modifies the norms (either using the Similarity given to this instance, or by using fake norms,
|
||||
* and closes the IndexReader.
|
||||
* </p>
|
||||
*
|
||||
* @param field the field whose norms should be reset
|
||||
*/
|
||||
public void reSetNorms(String field) throws IOException {
|
||||
Similarity fieldSim = sim.get(field);
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
||||
final List<IndexReader> subReaders = new ArrayList<IndexReader>();
|
||||
ReaderUtil.gatherSubReaders(subReaders, reader);
|
||||
|
||||
final FieldInvertState invertState = new FieldInvertState();
|
||||
for(IndexReader subReader : subReaders) {
|
||||
final Bits liveDocs = subReader.getLiveDocs();
|
||||
|
||||
int[] termCounts = new int[subReader.maxDoc()];
|
||||
Fields fields = subReader.fields();
|
||||
if (fields != null) {
|
||||
Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
DocsEnum docs = null;
|
||||
DocsEnum docsAndFreqs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docsAndFreqs = termsEnum.docs(liveDocs, docsAndFreqs, true);
|
||||
final DocsEnum docs2;
|
||||
if (docsAndFreqs != null) {
|
||||
docs2 = docsAndFreqs;
|
||||
} else {
|
||||
docs2 = docs = termsEnum.docs(liveDocs, docs, false);
|
||||
}
|
||||
while(true) {
|
||||
int docID = docs2.nextDoc();
|
||||
if (docID != docs.NO_MORE_DOCS) {
|
||||
termCounts[docID] += docsAndFreqs == null ? 1 : docsAndFreqs.freq();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
invertState.setBoost(1.0f);
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (liveDocs == null || liveDocs.get(d)) {
|
||||
invertState.setLength(termCounts[d]);
|
||||
subReader.setNorm(d, field, fieldSim.computeNorm(invertState));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} finally {
|
||||
if (null != reader) reader.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -49,7 +49,7 @@ public class MultiPassIndexSplitter {
|
|||
|
||||
/**
|
||||
* Split source index into multiple parts.
|
||||
* @param input source index, can be read-only, can have deletions, can have
|
||||
* @param in source index, can have deletions, can have
|
||||
* multiple segments (or multiple readers).
|
||||
* @param outputs list of directories where the output parts will be stored.
|
||||
* @param seq if true, then the source index will be split into equal
|
||||
|
@ -57,18 +57,18 @@ public class MultiPassIndexSplitter {
|
|||
* assigned in a deterministic round-robin fashion to one of the output splits.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void split(Version version, IndexReader input, Directory[] outputs, boolean seq) throws IOException {
|
||||
public void split(Version version, IndexReader in, Directory[] outputs, boolean seq) throws IOException {
|
||||
if (outputs == null || outputs.length < 2) {
|
||||
throw new IOException("Invalid number of outputs.");
|
||||
}
|
||||
if (input == null || input.numDocs() < 2) {
|
||||
if (in == null || in.numDocs() < 2) {
|
||||
throw new IOException("Not enough documents for splitting");
|
||||
}
|
||||
int numParts = outputs.length;
|
||||
// wrap a potentially read-only input
|
||||
// this way we don't have to preserve original deletions because neither
|
||||
// deleteDocument(int) or undeleteAll() is applied to the wrapped input index.
|
||||
input = new FakeDeleteIndexReader(input);
|
||||
FakeDeleteIndexReader input = new FakeDeleteIndexReader(in);
|
||||
int maxDoc = input.maxDoc();
|
||||
int partLen = maxDoc / numParts;
|
||||
for (int i = 0; i < numParts; i++) {
|
||||
|
@ -143,7 +143,7 @@ public class MultiPassIndexSplitter {
|
|||
System.err.println("Invalid input index - skipping: " + file);
|
||||
continue;
|
||||
}
|
||||
indexes.add(IndexReader.open(dir, true));
|
||||
indexes.add(IndexReader.open(dir));
|
||||
}
|
||||
}
|
||||
if (outDir == null) {
|
||||
|
@ -183,7 +183,7 @@ public class MultiPassIndexSplitter {
|
|||
|
||||
public FakeDeleteIndexReader(IndexReader in) {
|
||||
super(new SlowMultiReaderWrapper(in));
|
||||
doUndeleteAll(); // initialize main bitset
|
||||
undeleteAll(); // initialize main bitset
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -191,12 +191,7 @@ public class MultiPassIndexSplitter {
|
|||
return liveDocs.cardinality();
|
||||
}
|
||||
|
||||
/**
|
||||
* Just removes our overlaid deletions - does not undelete the original
|
||||
* deletions.
|
||||
*/
|
||||
@Override
|
||||
protected void doUndeleteAll() {
|
||||
void undeleteAll() {
|
||||
final int maxDoc = in.maxDoc();
|
||||
liveDocs = new FixedBitSet(in.maxDoc());
|
||||
if (in.hasDeletions()) {
|
||||
|
@ -212,8 +207,7 @@ public class MultiPassIndexSplitter {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int n) {
|
||||
void deleteDocument(int n) {
|
||||
liveDocs.clear(n);
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ public class HighFreqTerms {
|
|||
}
|
||||
}
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
TermStats[] terms = getHighFreqTerms(reader, numTerms, field);
|
||||
if (!IncludeTermFreqs) {
|
||||
//default HighFreqTerms behavior
|
||||
|
|
|
@ -1,259 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests changing of field norms with a custom similarity and with fake norms.
|
||||
*/
|
||||
public class TestFieldNormModifier extends LuceneTestCase {
|
||||
public static int NUM_DOCS = 5;
|
||||
|
||||
public Directory store;
|
||||
|
||||
/** inverts the normal notion of lengthNorm */
|
||||
public static SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
store = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
Document d = new Document();
|
||||
|
||||
d.add(newField("field", "word", TextField.TYPE_STORED));
|
||||
|
||||
d.add(newField("nonorm", "word", StringField.TYPE_STORED));
|
||||
d.add(newField("untokfield", "20061212 20071212", TextField.TYPE_STORED));
|
||||
|
||||
for (int j = 1; j <= i; j++) {
|
||||
d.add(newField("field", "crap", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "more words", StringField.TYPE_STORED));
|
||||
}
|
||||
writer.addDocument(d);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
store.close();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testMissingField() throws Exception {
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nobodyherebutuschickens");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = MultiNorms.norms(r, "nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nonorm");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = MultiNorms.norms(r, "nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
}
|
||||
|
||||
|
||||
public void testGoodCases() throws Exception {
|
||||
|
||||
IndexReader reader = IndexReader.open(store);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should put docs with shorter length first
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = Float.MAX_VALUE;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " <= " + lastScore;
|
||||
assertTrue(msg, scores[i] <= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
reader = IndexReader.open(store);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = 0.0f;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " >= " + lastScore;
|
||||
assertTrue(msg, scores[i] >= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
}
|
||||
|
||||
public void testNormKiller() throws IOException {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] oldNorms = MultiNorms.norms(r, "untokfield");
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("untokfield");
|
||||
|
||||
r = IndexReader.open(store, false);
|
||||
byte[] newNorms = MultiNorms.norms(r, "untokfield");
|
||||
r.close();
|
||||
assertFalse(Arrays.equals(oldNorms, newNorms));
|
||||
|
||||
|
||||
// verify that we still get documents in the same order as originally
|
||||
IndexReader reader = IndexReader.open(store);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should return the same score for all documents for this query
|
||||
searcher.search(new TermQuery(new Term("untokfield", "20061212")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = scores[0];
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " == " + lastScore;
|
||||
assertTrue(msg, scores[i] == lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
}
|
||||
}
|
|
@ -69,7 +69,7 @@ public class TestIndexSplitter extends LuceneTestCase {
|
|||
String splitSegName = is.infos.info(1).name;
|
||||
is.split(destDir, new String[] {splitSegName});
|
||||
Directory fsDirDest = newFSDirectory(destDir);
|
||||
IndexReader r = IndexReader.open(fsDirDest, true);
|
||||
IndexReader r = IndexReader.open(fsDirDest);
|
||||
assertEquals(50, r.maxDoc());
|
||||
r.close();
|
||||
fsDirDest.close();
|
||||
|
@ -81,76 +81,17 @@ public class TestIndexSplitter extends LuceneTestCase {
|
|||
IndexSplitter.main(new String[] {dir.getAbsolutePath(), destDir2.getAbsolutePath(), splitSegName});
|
||||
assertEquals(4, destDir2.listFiles().length);
|
||||
Directory fsDirDest2 = newFSDirectory(destDir2);
|
||||
r = IndexReader.open(fsDirDest2, true);
|
||||
r = IndexReader.open(fsDirDest2);
|
||||
assertEquals(50, r.maxDoc());
|
||||
r.close();
|
||||
fsDirDest2.close();
|
||||
|
||||
// now remove the copied segment from src
|
||||
IndexSplitter.main(new String[] {dir.getAbsolutePath(), "-d", splitSegName});
|
||||
r = IndexReader.open(fsDir, true);
|
||||
r = IndexReader.open(fsDir);
|
||||
assertEquals(2, r.getSequentialSubReaders().length);
|
||||
r.close();
|
||||
fsDir.close();
|
||||
}
|
||||
|
||||
public void testDeleteThenFullMerge() throws Exception {
|
||||
// Create directories where the indexes will reside
|
||||
File indexPath = new File(TEMP_DIR, "testfilesplitter");
|
||||
_TestUtil.rmDir(indexPath);
|
||||
indexPath.mkdirs();
|
||||
File indexSplitPath = new File(TEMP_DIR, "testfilesplitterdest");
|
||||
_TestUtil.rmDir(indexSplitPath);
|
||||
indexSplitPath.mkdirs();
|
||||
|
||||
// Create the original index
|
||||
LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();
|
||||
mergePolicy.setNoCFSRatio(1);
|
||||
IndexWriterConfig iwConfig
|
||||
= new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.CREATE)
|
||||
.setMergePolicy(mergePolicy);
|
||||
Directory fsDir = newFSDirectory(indexPath);
|
||||
IndexWriter indexWriter = new IndexWriter(fsDir, iwConfig);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", "doc 1", StringField.TYPE_STORED));
|
||||
indexWriter.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new Field("content", "doc 2", StringField.TYPE_STORED));
|
||||
indexWriter.addDocument(doc);
|
||||
indexWriter.close();
|
||||
fsDir.close();
|
||||
|
||||
// Create the split index
|
||||
IndexSplitter indexSplitter = new IndexSplitter(indexPath);
|
||||
String splitSegName = indexSplitter.infos.info(0).name;
|
||||
indexSplitter.split(indexSplitPath, new String[] {splitSegName});
|
||||
|
||||
// Delete the first document in the split index
|
||||
Directory fsDirDest = newFSDirectory(indexSplitPath);
|
||||
IndexReader indexReader = IndexReader.open(fsDirDest, false);
|
||||
indexReader.deleteDocument(0);
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
indexReader.close();
|
||||
fsDirDest.close();
|
||||
|
||||
// Fully merge the split index
|
||||
mergePolicy = new LogByteSizeMergePolicy();
|
||||
mergePolicy.setNoCFSRatio(1);
|
||||
iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND)
|
||||
.setMergePolicy(mergePolicy);
|
||||
fsDirDest = newFSDirectory(indexSplitPath);
|
||||
indexWriter = new IndexWriter(fsDirDest, iwConfig);
|
||||
indexWriter.forceMerge(1);
|
||||
indexWriter.close();
|
||||
fsDirDest.close();
|
||||
|
||||
// Read the number of docs in the index
|
||||
fsDirDest = newFSDirectory(indexSplitPath);
|
||||
indexReader = IndexReader.open(fsDirDest);
|
||||
assertEquals(1, indexReader.numDocs());
|
||||
indexReader.close();
|
||||
fsDirDest.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,10 +41,10 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
doc.add(newField("f", i + " " + i, TextField.TYPE_STORED));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.commit();
|
||||
w.deleteDocuments(new Term("id", "" + (NUM_DOCS-1)));
|
||||
w.close();
|
||||
input = IndexReader.open(dir, false);
|
||||
// delete the last doc
|
||||
input.deleteDocument(input.maxDoc() - 1);
|
||||
input = IndexReader.open(dir);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,7 +66,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
};
|
||||
splitter.split(TEST_VERSION_CURRENT, input, dirs, false);
|
||||
IndexReader ir;
|
||||
ir = IndexReader.open(dirs[0], true);
|
||||
ir = IndexReader.open(dirs[0]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
|
||||
Document doc = ir.document(0);
|
||||
assertEquals("0", doc.get("id"));
|
||||
|
@ -74,7 +74,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
|
||||
assertNotSame("1", te.term().utf8ToString());
|
||||
ir.close();
|
||||
ir = IndexReader.open(dirs[1], true);
|
||||
ir = IndexReader.open(dirs[1]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
doc = ir.document(0);
|
||||
assertEquals("1", doc.get("id"));
|
||||
|
@ -83,7 +83,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
|
||||
assertNotSame("0", te.term().utf8ToString());
|
||||
ir.close();
|
||||
ir = IndexReader.open(dirs[2], true);
|
||||
ir = IndexReader.open(dirs[2]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
doc = ir.document(0);
|
||||
assertEquals("2", doc.get("id"));
|
||||
|
@ -111,19 +111,19 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
};
|
||||
splitter.split(TEST_VERSION_CURRENT, input, dirs, true);
|
||||
IndexReader ir;
|
||||
ir = IndexReader.open(dirs[0], true);
|
||||
ir = IndexReader.open(dirs[0]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
Document doc = ir.document(0);
|
||||
assertEquals("0", doc.get("id"));
|
||||
int start = ir.numDocs();
|
||||
ir.close();
|
||||
ir = IndexReader.open(dirs[1], true);
|
||||
ir = IndexReader.open(dirs[1]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
doc = ir.document(0);
|
||||
assertEquals(start + "", doc.get("id"));
|
||||
start += ir.numDocs();
|
||||
ir.close();
|
||||
ir = IndexReader.open(dirs[2], true);
|
||||
ir = IndexReader.open(dirs[2]);
|
||||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
doc = ir.document(0);
|
||||
assertEquals(start + "", doc.get("id"));
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
|
|||
TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))
|
||||
.setMaxBufferedDocs(2));
|
||||
indexDocs(writer);
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
_TestUtil.checkIndex(dir);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,222 +0,0 @@
|
|||
package org.apache.lucene.misc;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
import org.apache.lucene.index.FieldNormModifier;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MultiNorms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests changing the norms after changing the simularity
|
||||
*/
|
||||
public class TestLengthNormModifier extends LuceneTestCase {
|
||||
public static int NUM_DOCS = 5;
|
||||
|
||||
public Directory store;
|
||||
|
||||
/** inverts the normal notion of lengthNorm */
|
||||
public static SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
store = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("field", "word", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "word", StringField.TYPE_STORED));
|
||||
|
||||
for (int j = 1; j <= i; j++) {
|
||||
d.add(newField("field", "crap", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "more words", StringField.TYPE_STORED));
|
||||
}
|
||||
writer.addDocument(d);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
store.close();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testMissingField() throws Exception {
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nobodyherebutuschickens");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = MultiNorms.norms(r, "nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nonorm");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = MultiNorms.norms(r, "nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void testGoodCases() throws Exception {
|
||||
|
||||
IndexSearcher searcher;
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should put docs with shorter length first
|
||||
IndexReader reader = IndexReader.open(store, false);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = Float.MAX_VALUE;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
|
||||
assertTrue(msg, scores[i] <= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
// override the norms to be inverted
|
||||
SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
reader = IndexReader.open(store, false);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = 0.0f;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", "+scores[i]+" >= "+lastScore;
|
||||
assertTrue(msg, scores[i] >= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -524,7 +524,7 @@ public class CheckIndex {
|
|||
}
|
||||
if (infoStream != null)
|
||||
infoStream.print(" test: open reader.........");
|
||||
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.DEFAULT);
|
||||
reader = SegmentReader.get(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.DEFAULT);
|
||||
|
||||
segInfoStat.openReaderPassed = true;
|
||||
|
||||
|
|
|
@ -44,18 +44,12 @@ import org.apache.lucene.util.MapBackedSet;
|
|||
*/
|
||||
class DirectoryReader extends IndexReader implements Cloneable {
|
||||
protected Directory directory;
|
||||
protected boolean readOnly;
|
||||
|
||||
IndexWriter writer;
|
||||
|
||||
private IndexDeletionPolicy deletionPolicy;
|
||||
private Lock writeLock;
|
||||
private final SegmentInfos segmentInfos;
|
||||
private boolean stale;
|
||||
private final int termInfosIndexDivisor;
|
||||
|
||||
private boolean rollbackHasChanges;
|
||||
|
||||
private SegmentReader[] subReaders;
|
||||
private ReaderContext topLevelReaderContext;
|
||||
private int[] starts; // 1st docno for each segment
|
||||
|
@ -63,41 +57,24 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
private int numDocs = -1;
|
||||
private boolean hasDeletions = false;
|
||||
|
||||
// Max version in index as of when we opened; this can be
|
||||
// > our current segmentInfos version in case we were
|
||||
// opened on a past IndexCommit:
|
||||
private long maxIndexVersion;
|
||||
|
||||
private final boolean applyAllDeletes;
|
||||
|
||||
// static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly,
|
||||
// final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
// return open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor, null);
|
||||
// }
|
||||
|
||||
static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly,
|
||||
static IndexReader open(final Directory directory, final IndexCommit commit,
|
||||
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
|
||||
@Override
|
||||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||
SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(directory, segmentFileName);
|
||||
return new DirectoryReader(directory, infos, deletionPolicy, readOnly, termInfosIndexDivisor);
|
||||
return new DirectoryReader(directory, infos, termInfosIndexDivisor);
|
||||
}
|
||||
}.run(commit);
|
||||
}
|
||||
|
||||
/** Construct reading the named set of readers. */
|
||||
// DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws IOException {
|
||||
// this(directory, sis, deletionPolicy, readOnly, termInfosIndexDivisor, null);
|
||||
// }
|
||||
|
||||
/** Construct reading the named set of readers. */
|
||||
DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws IOException {
|
||||
DirectoryReader(Directory directory, SegmentInfos sis, int termInfosIndexDivisor) throws IOException {
|
||||
this.directory = directory;
|
||||
this.readOnly = readOnly;
|
||||
this.segmentInfos = sis;
|
||||
this.deletionPolicy = deletionPolicy;
|
||||
this.termInfosIndexDivisor = termInfosIndexDivisor;
|
||||
readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
|
||||
applyAllDeletes = false;
|
||||
|
@ -111,7 +88,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
for (int i = sis.size()-1; i >= 0; i--) {
|
||||
boolean success = false;
|
||||
try {
|
||||
readers[i] = SegmentReader.get(readOnly, sis.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
readers[i] = SegmentReader.get(sis.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
readers[i].readerFinishedListeners = readerFinishedListeners;
|
||||
success = true;
|
||||
} finally {
|
||||
|
@ -134,7 +111,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
// Used by near real-time search
|
||||
DirectoryReader(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes) throws IOException {
|
||||
this.directory = writer.getDirectory();
|
||||
this.readOnly = true;
|
||||
this.applyAllDeletes = applyAllDeletes; // saved for reopen
|
||||
|
||||
this.termInfosIndexDivisor = writer.getConfig().getReaderTermsIndexDivisor();
|
||||
|
@ -186,9 +162,8 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
|
||||
/** This constructor is only used for {@link #doOpenIfChanged()} */
|
||||
DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders,
|
||||
boolean readOnly, boolean doClone, int termInfosIndexDivisor, Collection<ReaderFinishedListener> readerFinishedListeners) throws IOException {
|
||||
boolean doClone, int termInfosIndexDivisor, Collection<ReaderFinishedListener> readerFinishedListeners) throws IOException {
|
||||
this.directory = directory;
|
||||
this.readOnly = readOnly;
|
||||
this.segmentInfos = infos;
|
||||
this.termInfosIndexDivisor = termInfosIndexDivisor;
|
||||
this.readerFinishedListeners = readerFinishedListeners;
|
||||
|
@ -231,12 +206,12 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
assert !doClone;
|
||||
|
||||
// this is a new reader; in case we hit an exception we can close it safely
|
||||
newReader = SegmentReader.get(readOnly, infos.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
newReader = SegmentReader.get(infos.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
newReader.readerFinishedListeners = readerFinishedListeners;
|
||||
readerShared[i] = false;
|
||||
newReaders[i] = newReader;
|
||||
} else {
|
||||
newReader = newReaders[i].reopenSegment(infos.info(i), doClone, readOnly);
|
||||
newReader = newReaders[i].reopenSegment(infos.info(i), doClone);
|
||||
if (newReader == null) {
|
||||
// this reader will be shared between the old and the new one,
|
||||
// so we must incRef it
|
||||
|
@ -281,9 +256,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder buffer = new StringBuilder();
|
||||
if (hasChanges) {
|
||||
buffer.append("*");
|
||||
}
|
||||
buffer.append(getClass().getSimpleName());
|
||||
buffer.append('(');
|
||||
final String segmentsFile = segmentInfos.getCurrentSegmentFileName();
|
||||
|
@ -323,10 +295,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
starts[subReaders.length] = maxDoc;
|
||||
|
||||
if (!readOnly) {
|
||||
maxIndexVersion = SegmentInfos.readCurrentVersion(directory);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -337,69 +305,46 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
@Override
|
||||
public final synchronized Object clone() {
|
||||
try {
|
||||
return clone(readOnly); // Preserve current readOnly
|
||||
DirectoryReader newReader = doOpenIfChanged((SegmentInfos) segmentInfos.clone(), true);
|
||||
newReader.writer = writer;
|
||||
newReader.hasDeletions = hasDeletions;
|
||||
assert newReader.readerFinishedListeners != null;
|
||||
return newReader;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
// doOpenIfChanged calls ensureOpen
|
||||
DirectoryReader newReader = doOpenIfChanged((SegmentInfos) segmentInfos.clone(), true, openReadOnly);
|
||||
|
||||
if (this != newReader) {
|
||||
newReader.deletionPolicy = deletionPolicy;
|
||||
}
|
||||
newReader.writer = writer;
|
||||
// If we're cloning a non-readOnly reader, move the
|
||||
// writeLock (if there is one) to the new reader:
|
||||
if (!openReadOnly && writeLock != null) {
|
||||
// In near real-time search, reader is always readonly
|
||||
assert writer == null;
|
||||
newReader.writeLock = writeLock;
|
||||
newReader.hasChanges = hasChanges;
|
||||
newReader.hasDeletions = hasDeletions;
|
||||
writeLock = null;
|
||||
hasChanges = false;
|
||||
}
|
||||
assert newReader.readerFinishedListeners != null;
|
||||
|
||||
return newReader;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
||||
// Preserve current readOnly
|
||||
return doOpenIfChanged(readOnly, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final IndexReader doOpenIfChanged(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
return doOpenIfChanged(openReadOnly, null);
|
||||
return doOpenIfChanged(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
return doOpenIfChanged(true, commit);
|
||||
ensureOpen();
|
||||
|
||||
// If we were obtained by writer.getReader(), re-ask the
|
||||
// writer to get a new reader.
|
||||
if (writer != null) {
|
||||
return doOpenFromWriter(commit);
|
||||
} else {
|
||||
return doOpenNoWriter(commit);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
|
||||
return doOpenIfChanged();
|
||||
} else {
|
||||
return doOpenFromWriter(null);
|
||||
} else {
|
||||
// fail by calling supers impl throwing UOE
|
||||
return super.doOpenIfChanged(writer, applyAllDeletes);
|
||||
}
|
||||
}
|
||||
|
||||
private final IndexReader doOpenFromWriter(boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
assert readOnly;
|
||||
|
||||
if (!openReadOnly) {
|
||||
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() can only be reopened with openReadOnly=true (got false)");
|
||||
}
|
||||
|
||||
private final IndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
if (commit != null) {
|
||||
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
|
||||
}
|
||||
|
@ -420,56 +365,18 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
return reader;
|
||||
}
|
||||
|
||||
private IndexReader doOpenIfChanged(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
|
||||
assert commit == null || openReadOnly;
|
||||
|
||||
// If we were obtained by writer.getReader(), re-ask the
|
||||
// writer to get a new reader.
|
||||
if (writer != null) {
|
||||
return doOpenFromWriter(openReadOnly, commit);
|
||||
} else {
|
||||
return doOpenNoWriter(openReadOnly, commit);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized IndexReader doOpenNoWriter(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
private synchronized IndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
|
||||
if (commit == null) {
|
||||
if (hasChanges) {
|
||||
// We have changes, which means we are not readOnly:
|
||||
assert readOnly == false;
|
||||
// and we hold the write lock:
|
||||
assert writeLock != null;
|
||||
// so no other writer holds the write lock, which
|
||||
// means no changes could have been done to the index:
|
||||
assert isCurrent();
|
||||
|
||||
if (openReadOnly) {
|
||||
return clone(openReadOnly);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else if (isCurrent()) {
|
||||
if (openReadOnly != readOnly) {
|
||||
// Just fallback to clone
|
||||
return clone(openReadOnly);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
if (isCurrent()) {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
if (directory != commit.getDirectory()) {
|
||||
throw new IOException("the specified commit does not match the specified Directory");
|
||||
}
|
||||
if (segmentInfos != null && commit.getSegmentsFileName().equals(segmentInfos.getCurrentSegmentFileName())) {
|
||||
if (readOnly != openReadOnly) {
|
||||
// Just fallback to clone
|
||||
return clone(openReadOnly);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -478,13 +385,13 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||
final SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(directory, segmentFileName);
|
||||
return doOpenIfChanged(infos, false, openReadOnly);
|
||||
return doOpenIfChanged(infos, false);
|
||||
}
|
||||
}.run(commit);
|
||||
}
|
||||
|
||||
private synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
return new DirectoryReader(directory, infos, subReaders, openReadOnly, doClone, termInfosIndexDivisor, readerFinishedListeners);
|
||||
private synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
|
||||
return new DirectoryReader(directory, infos, subReaders, doClone, termInfosIndexDivisor, readerFinishedListeners);
|
||||
}
|
||||
|
||||
/** Version number when this IndexReader was opened. */
|
||||
|
@ -535,23 +442,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
return hasDeletions;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException {
|
||||
numDocs = -1; // invalidate cache
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
|
||||
hasDeletions = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() throws CorruptIndexException, IOException {
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
subReaders[i].undeleteAll();
|
||||
|
||||
hasDeletions = false;
|
||||
numDocs = -1; // invalidate cache
|
||||
}
|
||||
|
||||
private int readerIndex(int n) { // find reader for doc n:
|
||||
return readerIndex(n, this.starts, this.subReaders.length);
|
||||
}
|
||||
|
@ -592,22 +482,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
int total = 0; // sum freqs in segments
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
total += subReaders[i].docFreq(t);
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef term) throws IOException {
|
||||
ensureOpen();
|
||||
|
@ -623,150 +497,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to acquire the WriteLock on this directory. this method is only valid if this IndexReader is directory
|
||||
* owner.
|
||||
*
|
||||
* @throws StaleReaderException if the index has changed since this reader was opened
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws org.apache.lucene.store.LockObtainFailedException
|
||||
* if another writer has this index open (<code>write.lock</code> could not be
|
||||
* obtained)
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
@Override
|
||||
protected void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
|
||||
if (readOnly) {
|
||||
// NOTE: we should not reach this code w/ the core
|
||||
// IndexReader classes; however, an external subclass
|
||||
// of IndexReader could reach this.
|
||||
throw new UnsupportedOperationException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
|
||||
}
|
||||
|
||||
if (segmentInfos != null) {
|
||||
ensureOpen();
|
||||
if (stale)
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
|
||||
|
||||
if (writeLock == null) {
|
||||
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
|
||||
if (!writeLock.obtain(IndexWriterConfig.WRITE_LOCK_TIMEOUT)) // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
this.writeLock = writeLock;
|
||||
|
||||
// we have to check whether index has changed since this reader was opened.
|
||||
// if so, this reader is no longer valid for deletion
|
||||
if (SegmentInfos.readCurrentVersion(directory) > maxIndexVersion) {
|
||||
stale = true;
|
||||
this.writeLock.release();
|
||||
this.writeLock = null;
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or setNorm operations
|
||||
* <p/>
|
||||
* If an exception is hit, then either no changes or all changes will have been committed to the index (transactional
|
||||
* semantics).
|
||||
*
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) throws IOException {
|
||||
// poll subreaders for changes
|
||||
for (int i = 0; !hasChanges && i < subReaders.length; i++) {
|
||||
hasChanges |= subReaders[i].hasChanges;
|
||||
}
|
||||
|
||||
if (hasChanges) {
|
||||
segmentInfos.setUserData(commitUserData);
|
||||
// Default deleter (for backwards compatibility) is
|
||||
// KeepOnlyLastCommitDeleter:
|
||||
// TODO: Decide what to do with InfoStream here? Use default or keep NO_OUTPUT?
|
||||
IndexFileDeleter deleter = new IndexFileDeleter(directory,
|
||||
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
|
||||
segmentInfos, InfoStream.NO_OUTPUT, null);
|
||||
segmentInfos.updateGeneration(deleter.getLastSegmentInfos());
|
||||
segmentInfos.changed();
|
||||
|
||||
// Checkpoint the state we are about to change, in
|
||||
// case we have to roll back:
|
||||
startCommit();
|
||||
|
||||
final List<SegmentInfo> rollbackSegments = segmentInfos.createBackupSegmentInfos(false);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
subReaders[i].commit();
|
||||
|
||||
// Remove segments that contain only 100% deleted
|
||||
// docs:
|
||||
segmentInfos.pruneDeletedSegments();
|
||||
|
||||
// Sync all files we just wrote
|
||||
directory.sync(segmentInfos.files(directory, false));
|
||||
segmentInfos.commit(directory, segmentInfos.codecFormat());
|
||||
success = true;
|
||||
} finally {
|
||||
|
||||
if (!success) {
|
||||
|
||||
// Rollback changes that were made to
|
||||
// SegmentInfos but failed to get [fully]
|
||||
// committed. This way this reader instance
|
||||
// remains consistent (matched to what's
|
||||
// actually in the index):
|
||||
rollbackCommit();
|
||||
|
||||
// Recompute deletable files & remove them (so
|
||||
// partially written .del files, etc, are
|
||||
// removed):
|
||||
deleter.refresh();
|
||||
|
||||
// Restore all SegmentInfos (in case we pruned some)
|
||||
segmentInfos.rollbackSegmentInfos(rollbackSegments);
|
||||
}
|
||||
}
|
||||
|
||||
// Have the deleter remove any now unreferenced
|
||||
// files due to this commit:
|
||||
deleter.checkpoint(segmentInfos, true);
|
||||
deleter.close();
|
||||
|
||||
maxIndexVersion = segmentInfos.getVersion();
|
||||
|
||||
if (writeLock != null) {
|
||||
writeLock.release(); // release write lock
|
||||
writeLock = null;
|
||||
}
|
||||
}
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
void startCommit() {
|
||||
rollbackHasChanges = hasChanges;
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
subReaders[i].startCommit();
|
||||
}
|
||||
}
|
||||
|
||||
void rollbackCommit() {
|
||||
hasChanges = rollbackHasChanges;
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
subReaders[i].rollbackCommit();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String,String> getCommitUserData() {
|
||||
ensureOpen();
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.lucene.util.InfoStream;
|
|||
* are processing the document).
|
||||
*
|
||||
* Other consumers, eg {@link FreqProxTermsWriter} and
|
||||
* {@link NormsWriter}, buffer bytes in RAM and flush only
|
||||
* {@link NormsConsumer}, buffer bytes in RAM and flush only
|
||||
* when a new segment is produced.
|
||||
|
||||
* Once we have used our allowed RAM buffer, or the number
|
||||
|
|
|
@ -79,7 +79,7 @@ public class DocumentsWriterPerThread {
|
|||
|
||||
final InvertedDocConsumer termsHash = new TermsHash(documentsWriterPerThread, freqProxWriter, true,
|
||||
new TermsHash(documentsWriterPerThread, termVectorsWriter, false, null));
|
||||
final NormsWriter normsWriter = new NormsWriter();
|
||||
final NormsConsumer normsWriter = new NormsConsumer(documentsWriterPerThread);
|
||||
final DocInverter docInverter = new DocInverter(documentsWriterPerThread.docState, termsHash, normsWriter);
|
||||
return new DocFieldProcessor(documentsWriterPerThread, docInverter);
|
||||
}
|
||||
|
|
|
@ -417,7 +417,7 @@ public final class FieldInfos implements Iterable<FieldInfo> {
|
|||
|
||||
public boolean hasNorms() {
|
||||
for (FieldInfo fi : this) {
|
||||
if (!fi.omitNorms) {
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -277,8 +277,6 @@ public class FilterIndexReader extends IndexReader {
|
|||
|
||||
/**
|
||||
* <p>Construct a FilterIndexReader based on the specified base reader.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* left to the base reader.</p>
|
||||
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
|
||||
* @param in specified base reader.
|
||||
*/
|
||||
|
@ -331,9 +329,6 @@ public class FilterIndexReader extends IndexReader {
|
|||
return in.hasDeletions();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
|
||||
|
||||
@Override
|
||||
public boolean hasNorms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
|
@ -346,30 +341,11 @@ public class FilterIndexReader extends IndexReader {
|
|||
return in.norms(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
|
||||
in.setNorm(d, f, b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
return in.docFreq(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef t) throws IOException {
|
||||
ensureOpen();
|
||||
return in.docFreq(field, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException { in.deleteDocument(n); }
|
||||
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) throws IOException {
|
||||
in.commit(commitUserData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
|
@ -450,4 +426,14 @@ public class FilterIndexReader extends IndexReader {
|
|||
ensureOpen();
|
||||
return in.perDocValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexCommit getIndexCommit() throws IOException {
|
||||
return in.getIndexCommit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTermInfosIndexDivisor() {
|
||||
return in.getTermInfosIndexDivisor();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ final class IndexFileDeleter {
|
|||
}
|
||||
sis = null;
|
||||
} catch (IOException e) {
|
||||
if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen) {
|
||||
if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen && directory.fileLength(fileName) > 0) {
|
||||
throw e;
|
||||
} else {
|
||||
// Most likely we are opening an index that
|
||||
|
|
|
@ -17,8 +17,6 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.lucene.index.codecs.Codec; // for javadocs
|
||||
|
||||
// TODO: put all files under codec and remove all the static extensions here
|
||||
|
@ -49,9 +47,6 @@ public final class IndexFileNames {
|
|||
|
||||
/** Name of the generation reference file name */
|
||||
public static final String SEGMENTS_GEN = "segments." + GEN_EXTENSION;
|
||||
|
||||
/** Extension of norms file */
|
||||
public static final String NORMS_EXTENSION = "nrm";
|
||||
|
||||
/** Extension of compound file */
|
||||
public static final String COMPOUND_FILE_EXTENSION = "cfs";
|
||||
|
@ -65,9 +60,6 @@ public final class IndexFileNames {
|
|||
/** Extension of deletes */
|
||||
public static final String DELETES_EXTENSION = "del";
|
||||
|
||||
/** Extension of separate norms */
|
||||
public static final String SEPARATE_NORMS_EXTENSION = "s";
|
||||
|
||||
/**
|
||||
* This array contains all filename extensions used by
|
||||
* Lucene's index files, with one exception, namely the
|
||||
|
@ -80,14 +72,9 @@ public final class IndexFileNames {
|
|||
COMPOUND_FILE_ENTRIES_EXTENSION,
|
||||
DELETES_EXTENSION,
|
||||
GEN_EXTENSION,
|
||||
NORMS_EXTENSION,
|
||||
COMPOUND_FILE_STORE_EXTENSION,
|
||||
};
|
||||
|
||||
public static final String[] NON_STORE_INDEX_EXTENSIONS = new String[] {
|
||||
NORMS_EXTENSION
|
||||
};
|
||||
|
||||
/**
|
||||
* Computes the full file name from base, extension and generation. If the
|
||||
* generation is -1, the file name is null. If it's 0, the file name is
|
||||
|
@ -188,17 +175,5 @@ public final class IndexFileNames {
|
|||
filename = filename.substring(0, idx);
|
||||
}
|
||||
return filename;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the given filename ends with the separate norms file
|
||||
* pattern: {@code SEPARATE_NORMS_EXTENSION + "[0-9]+"}.
|
||||
*/
|
||||
public static boolean isSeparateNormsFile(String filename) {
|
||||
int idx = filename.lastIndexOf('.');
|
||||
if (idx == -1) return false;
|
||||
String ext = filename.substring(idx + 1);
|
||||
return Pattern.matches(SEPARATE_NORMS_EXTENSION + "[0-9]+", ext);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.index.codecs.PerDocValues;
|
|||
import org.apache.lucene.index.values.IndexDocValues;
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
import org.apache.lucene.search.SearcherManager; // javadocs
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -45,7 +44,7 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
|
|||
|
||||
<p> Concrete subclasses of IndexReader are usually constructed with a call to
|
||||
one of the static <code>open()</code> methods, e.g. {@link
|
||||
#open(Directory, boolean)}.
|
||||
#open(Directory)}.
|
||||
|
||||
<p> For efficiency, in this API documents are often referred to via
|
||||
<i>document numbers</i>, non-negative integers which each name a unique
|
||||
|
@ -53,9 +52,6 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
|
|||
as documents are added to and deleted from an index. Clients should thus not
|
||||
rely on a given document having the same number between sessions.
|
||||
|
||||
<p> An IndexReader can be opened on a directory for which an IndexWriter is
|
||||
opened already, but it cannot be used to delete documents from the index then.
|
||||
|
||||
<p>
|
||||
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
|
||||
as abstract, but have no useful implementations in this base class and
|
||||
|
@ -66,13 +62,6 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
|
|||
|
||||
<p>
|
||||
|
||||
<b>NOTE</b>: as of 2.4, it's possible to open a read-only
|
||||
IndexReader using the static open methods that accept the
|
||||
boolean readOnly parameter. Such a reader may have better
|
||||
concurrency. You must specify false if you want to
|
||||
make changes with the resulting IndexReader.
|
||||
</p>
|
||||
|
||||
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
||||
IndexReader} instances are completely thread
|
||||
safe, meaning multiple threads can call any of its methods,
|
||||
|
@ -175,14 +164,13 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
private volatile boolean closed;
|
||||
protected boolean hasChanges;
|
||||
|
||||
private final AtomicInteger refCount = new AtomicInteger();
|
||||
|
||||
static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
|
||||
|
||||
/** Expert: returns the current refCount for this reader */
|
||||
public int getRefCount() {
|
||||
public final int getRefCount() {
|
||||
return refCount.get();
|
||||
}
|
||||
|
||||
|
@ -201,7 +189,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* @see #decRef
|
||||
* @see #tryIncRef
|
||||
*/
|
||||
public void incRef() {
|
||||
public final void incRef() {
|
||||
ensureOpen();
|
||||
refCount.incrementAndGet();
|
||||
}
|
||||
|
@ -229,7 +217,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* @see #decRef
|
||||
* @see #incRef
|
||||
*/
|
||||
public boolean tryIncRef() {
|
||||
public final boolean tryIncRef() {
|
||||
int count;
|
||||
while ((count = refCount.get()) > 0) {
|
||||
if (refCount.compareAndSet(count, count+1)) {
|
||||
|
@ -243,9 +231,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder buffer = new StringBuilder();
|
||||
if (hasChanges) {
|
||||
buffer.append('*');
|
||||
}
|
||||
buffer.append(getClass().getSimpleName());
|
||||
buffer.append('(');
|
||||
final IndexReader[] subReaders = getSequentialSubReaders();
|
||||
|
@ -270,13 +255,12 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
*
|
||||
* @see #incRef
|
||||
*/
|
||||
public void decRef() throws IOException {
|
||||
public final void decRef() throws IOException {
|
||||
ensureOpen();
|
||||
final int rc = refCount.getAndDecrement();
|
||||
if (rc == 1) {
|
||||
boolean success = false;
|
||||
try {
|
||||
commit();
|
||||
doClose();
|
||||
success = true;
|
||||
} finally {
|
||||
|
@ -305,27 +289,33 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
/** Returns a IndexReader reading the index in the given
|
||||
* Directory, with readOnly=true.
|
||||
* Directory
|
||||
* @param directory the index directory
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final Directory directory) throws CorruptIndexException, IOException {
|
||||
return open(directory, null, null, true, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
return DirectoryReader.open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
}
|
||||
|
||||
/** Returns an IndexReader reading the index in the given
|
||||
* Directory. You should pass readOnly=true, since it
|
||||
* gives much better concurrent performance, unless you
|
||||
* intend to do write operations (delete documents or
|
||||
* change norms) with the reader.
|
||||
|
||||
/** Expert: Returns a IndexReader reading the index in the given
|
||||
* Directory with the given termInfosIndexDivisor.
|
||||
* @param directory the index directory
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
* @param termInfosIndexDivisor Subsamples which indexed
|
||||
* terms are loaded into RAM. This has the same effect as {@link
|
||||
* IndexWriterConfig#setTermIndexInterval} except that setting
|
||||
* must be done at indexing time while this setting can be
|
||||
* set per reader. When set to N, then one in every
|
||||
* N*termIndexInterval terms in the index is loaded into
|
||||
* memory. By setting this to a value > 1 you can reduce
|
||||
* memory usage, at the expense of higher latency when
|
||||
* loading a TermInfo. The default value is 1. Set this
|
||||
* to -1 to skip loading the terms index entirely.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final Directory directory, boolean readOnly) throws CorruptIndexException, IOException {
|
||||
return open(directory, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
public static IndexReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return DirectoryReader.open(directory, null, termInfosIndexDivisor);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -352,48 +342,19 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in the given
|
||||
* {@link IndexCommit}. You should pass readOnly=true, since it
|
||||
* gives much better concurrent performance, unless you
|
||||
* intend to do write operations (delete documents or
|
||||
* change norms) with the reader.
|
||||
* {@link IndexCommit}.
|
||||
* @param commit the commit point to open
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final IndexCommit commit, boolean readOnly) throws CorruptIndexException, IOException {
|
||||
return open(commit.getDirectory(), null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
public static IndexReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
|
||||
return DirectoryReader.open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in
|
||||
* the given Directory, with a custom {@link
|
||||
* IndexDeletionPolicy}. You should pass readOnly=true,
|
||||
* since it gives much better concurrent performance,
|
||||
* unless you intend to do write operations (delete
|
||||
* documents or change norms) with the reader.
|
||||
* @param directory the index directory
|
||||
* @param deletionPolicy a custom deletion policy (only used
|
||||
* if you use this reader to perform deletes or to set
|
||||
* norms); see {@link IndexWriter} for details.
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final Directory directory, IndexDeletionPolicy deletionPolicy, boolean readOnly) throws CorruptIndexException, IOException {
|
||||
return open(directory, deletionPolicy, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in
|
||||
* the given Directory, with a custom {@link
|
||||
* IndexDeletionPolicy}. You should pass readOnly=true,
|
||||
* since it gives much better concurrent performance,
|
||||
* unless you intend to do write operations (delete
|
||||
* documents or change norms) with the reader.
|
||||
* @param directory the index directory
|
||||
* @param deletionPolicy a custom deletion policy (only used
|
||||
* if you use this reader to perform deletes or to set
|
||||
* norms); see {@link IndexWriter} for details.
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
/** Expert: returns an IndexReader reading the index in the given
|
||||
* {@link IndexCommit} and termInfosIndexDivisor.
|
||||
* @param commit the commit point to open
|
||||
* @param termInfosIndexDivisor Subsamples which indexed
|
||||
* terms are loaded into RAM. This has the same effect as {@link
|
||||
* IndexWriterConfig#setTermIndexInterval} except that setting
|
||||
|
@ -407,65 +368,8 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final Directory directory, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return open(directory, deletionPolicy, null, readOnly, termInfosIndexDivisor);
|
||||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in
|
||||
* the given Directory, using a specific commit and with
|
||||
* a custom {@link IndexDeletionPolicy}. You should pass
|
||||
* readOnly=true, since it gives much better concurrent
|
||||
* performance, unless you intend to do write operations
|
||||
* (delete documents or change norms) with the reader.
|
||||
* @param commit the specific {@link IndexCommit} to open;
|
||||
* see {@link IndexReader#listCommits} to list all commits
|
||||
* in a directory
|
||||
* @param deletionPolicy a custom deletion policy (only used
|
||||
* if you use this reader to perform deletes or to set
|
||||
* norms); see {@link IndexWriter} for details.
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final IndexCommit commit, IndexDeletionPolicy deletionPolicy, boolean readOnly) throws CorruptIndexException, IOException {
|
||||
return open(commit.getDirectory(), deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in
|
||||
* the given Directory, using a specific commit and with
|
||||
* a custom {@link IndexDeletionPolicy}. You should pass
|
||||
* readOnly=true, since it gives much better concurrent
|
||||
* performance, unless you intend to do write operations
|
||||
* (delete documents or change norms) with the reader.
|
||||
* @param commit the specific {@link IndexCommit} to open;
|
||||
* see {@link IndexReader#listCommits} to list all commits
|
||||
* in a directory
|
||||
* @param deletionPolicy a custom deletion policy (only used
|
||||
* if you use this reader to perform deletes or to set
|
||||
* norms); see {@link IndexWriter} for details.
|
||||
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
|
||||
* @param termInfosIndexDivisor Subsamples which indexed
|
||||
* terms are loaded into RAM. This has the same effect as {@link
|
||||
* IndexWriterConfig#setTermIndexInterval} except that setting
|
||||
* must be done at indexing time while this setting can be
|
||||
* set per reader. When set to N, then one in every
|
||||
* N*termIndexInterval terms in the index is loaded into
|
||||
* memory. By setting this to a value > 1 you can reduce
|
||||
* memory usage, at the expense of higher latency when
|
||||
* loading a TermInfo. The default value is 1. Set this
|
||||
* to -1 to skip loading the terms index entirely. This is only useful in
|
||||
* advanced situations when you will only .next() through all terms;
|
||||
* attempts to seek will hit an exception.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static IndexReader open(final IndexCommit commit, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return open(commit.getDirectory(), deletionPolicy, commit, readOnly, termInfosIndexDivisor);
|
||||
}
|
||||
|
||||
private static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return DirectoryReader.open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
|
||||
public static IndexReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return DirectoryReader.open(commit.getDirectory(), commit, termInfosIndexDivisor);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -487,11 +391,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* reader while other threads are still using it; see
|
||||
* {@link SearcherManager} to simplify managing this.
|
||||
*
|
||||
* <p>If a new reader is returned, it's safe to make changes
|
||||
* (deletions, norms) with it. All shared mutable state
|
||||
* with the old reader uses "copy on write" semantics to
|
||||
* ensure the changes are not seen by other readers.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @return null if there are no changes; else, a new
|
||||
|
@ -503,29 +402,13 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
return newReader;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the index has changed since the provided reader was
|
||||
* opened, open and return a new reader, with the
|
||||
* specified <code>readOnly</code>; else, return
|
||||
* null.
|
||||
*
|
||||
* @see #openIfChanged(IndexReader)
|
||||
*/
|
||||
public static IndexReader openIfChanged(IndexReader oldReader, boolean readOnly) throws IOException {
|
||||
final IndexReader newReader = oldReader.doOpenIfChanged(readOnly);
|
||||
assert newReader != oldReader;
|
||||
return newReader;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the IndexCommit differs from what the
|
||||
* provided reader is searching, or the provided reader is
|
||||
* not already read-only, open and return a new
|
||||
* <code>readOnly=true</code> reader; else, return null.
|
||||
* provided reader is searching, open and return a new
|
||||
* reader; else, return null.
|
||||
*
|
||||
* @see #openIfChanged(IndexReader)
|
||||
*/
|
||||
// TODO: should you be able to specify readOnly?
|
||||
public static IndexReader openIfChanged(IndexReader oldReader, IndexCommit commit) throws IOException {
|
||||
final IndexReader newReader = oldReader.doOpenIfChanged(commit);
|
||||
assert newReader != oldReader;
|
||||
|
@ -535,7 +418,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
/**
|
||||
* Expert: If there changes (committed or not) in the
|
||||
* {@link IndexWriter} versus what the provided reader is
|
||||
* searching, then open and return a new read-only
|
||||
* searching, then open and return a new
|
||||
* IndexReader searching both committed and uncommitted
|
||||
* changes from the writer; else, return null (though, the
|
||||
* current implementation never returns null).
|
||||
|
@ -609,16 +492,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
throw new UnsupportedOperationException("This reader does not support reopen().");
|
||||
}
|
||||
|
||||
/**
|
||||
* If the index has changed since it was opened, open and return a new reader;
|
||||
* else, return {@code null}.
|
||||
*
|
||||
* @see #openIfChanged(IndexReader, boolean)
|
||||
*/
|
||||
protected IndexReader doOpenIfChanged(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
throw new UnsupportedOperationException("This reader does not support reopen().");
|
||||
}
|
||||
|
||||
/**
|
||||
* If the index has changed since it was opened, open and return a new reader;
|
||||
* else, return {@code null}.
|
||||
|
@ -642,34 +515,11 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
/**
|
||||
* Efficiently clones the IndexReader (sharing most
|
||||
* internal state).
|
||||
* <p>
|
||||
* On cloning a reader with pending changes (deletions,
|
||||
* norms), the original reader transfers its write lock to
|
||||
* the cloned reader. This means only the cloned reader
|
||||
* may make further changes to the index, and commit the
|
||||
* changes to the index on close, but the old reader still
|
||||
* reflects all changes made up until it was cloned.
|
||||
* <p>
|
||||
* Like {@link #openIfChanged(IndexReader)}, it's safe to make changes to
|
||||
* either the original or the cloned reader: all shared
|
||||
* mutable state obeys "copy on write" semantics to ensure
|
||||
* the changes are not seen by other readers.
|
||||
* <p>
|
||||
*/
|
||||
@Override
|
||||
public synchronized Object clone() {
|
||||
throw new UnsupportedOperationException("This reader does not implement clone()");
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the IndexReader and optionally changes readOnly. A readOnly
|
||||
* reader cannot open a writeable reader.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
throw new UnsupportedOperationException("This reader does not implement clone()");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the directory associated with this index. The Default
|
||||
|
@ -807,14 +657,14 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* term vectors were not indexed. The returned Fields
|
||||
* instance acts like a single-document inverted index
|
||||
* (the docID will be 0). */
|
||||
abstract public Fields getTermVectors(int docID)
|
||||
public abstract Fields getTermVectors(int docID)
|
||||
throws IOException;
|
||||
|
||||
/** Retrieve term vector for this document and field, or
|
||||
* null if term vectors were not indexed. The returned
|
||||
* Fields instance acts like a single-document inverted
|
||||
* index (the docID will be 0). */
|
||||
public Terms getTermVector(int docID, String field)
|
||||
public final Terms getTermVector(int docID, String field)
|
||||
throws IOException {
|
||||
Fields vectors = getTermVectors(docID);
|
||||
if (vectors == null) {
|
||||
|
@ -848,7 +698,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
public abstract int maxDoc();
|
||||
|
||||
/** Returns the number of deleted documents. */
|
||||
public int numDeletedDocs() {
|
||||
public final int numDeletedDocs() {
|
||||
return maxDoc() - numDocs();
|
||||
}
|
||||
|
||||
|
@ -881,7 +731,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
// TODO: we need a separate StoredField, so that the
|
||||
// Document returned here contains that class not
|
||||
// IndexableField
|
||||
public Document document(int docID) throws CorruptIndexException, IOException {
|
||||
public final Document document(int docID) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
if (docID < 0 || docID >= maxDoc()) {
|
||||
throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc() + " (got docID=" + docID + ")");
|
||||
|
@ -910,39 +760,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
*/
|
||||
public abstract byte[] norms(String field) throws IOException;
|
||||
|
||||
/** Expert: Resets the normalization factor for the named field of the named
|
||||
* document. By default, the norm represents the product of the field's {@link
|
||||
* org.apache.lucene.document.Field#setBoost(float) boost} and its
|
||||
* length normalization}. Thus, to preserve the length normalization
|
||||
* values when resetting this, one should base the new value upon the old.
|
||||
*
|
||||
* <b>NOTE:</b> If this field does not index norms, then
|
||||
* this method throws {@link IllegalStateException}.
|
||||
*
|
||||
* @see #norms(String)
|
||||
* @see Similarity#computeNorm(FieldInvertState)
|
||||
* @see org.apache.lucene.search.similarities.DefaultSimilarity#decodeNormValue(byte)
|
||||
* @throws StaleReaderException if the index has changed
|
||||
* since this reader was opened
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @throws IllegalStateException if the field does not index norms
|
||||
*/
|
||||
public synchronized void setNorm(int doc, String field, byte value)
|
||||
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
acquireWriteLock();
|
||||
hasChanges = true;
|
||||
doSetNorm(doc, field, value);
|
||||
}
|
||||
|
||||
/** Implements setNorm in subclass.*/
|
||||
protected abstract void doSetNorm(int doc, String field, byte value)
|
||||
throws CorruptIndexException, IOException;
|
||||
|
||||
/**
|
||||
* Returns {@link Fields} for this reader.
|
||||
* This method may return null if the reader has no
|
||||
|
@ -973,7 +790,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* through them yourself. */
|
||||
public abstract PerDocValues perDocValues() throws IOException;
|
||||
|
||||
public int docFreq(Term term) throws IOException {
|
||||
public final int docFreq(Term term) throws IOException {
|
||||
return docFreq(term.field(), term.bytes());
|
||||
}
|
||||
|
||||
|
@ -1004,7 +821,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* field does not exists. This method does not take into
|
||||
* account deleted documents that have not yet been merged
|
||||
* away. */
|
||||
public long totalTermFreq(String field, BytesRef term) throws IOException {
|
||||
public final long totalTermFreq(String field, BytesRef term) throws IOException {
|
||||
final Fields fields = fields();
|
||||
if (fields == null) {
|
||||
return 0;
|
||||
|
@ -1022,7 +839,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
/** This may return null if the field does not exist.*/
|
||||
public Terms terms(String field) throws IOException {
|
||||
public final Terms terms(String field) throws IOException {
|
||||
final Fields fields = fields();
|
||||
if (fields == null) {
|
||||
return null;
|
||||
|
@ -1033,7 +850,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
/** Returns {@link DocsEnum} for the specified field &
|
||||
* term. This may return null, if either the field or
|
||||
* term does not exist. */
|
||||
public DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
|
||||
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1053,7 +870,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* field & term. This may return null, if either the
|
||||
* field or term does not exist, or, positions were not
|
||||
* indexed for this field. */
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1074,7 +891,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* {@link TermState}. This may return null, if either the field or the term
|
||||
* does not exists or the {@link TermState} is invalid for the underlying
|
||||
* implementation.*/
|
||||
public DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
|
||||
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
|
||||
assert state != null;
|
||||
assert field != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1094,7 +911,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* {@link TermState}. This may return null, if either the field or the term
|
||||
* does not exists, the {@link TermState} is invalid for the underlying
|
||||
* implementation, or positions were not indexed for this field. */
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
assert state != null;
|
||||
assert field != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1109,159 +926,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
return null;
|
||||
}
|
||||
|
||||
|
||||
/** Deletes the document numbered <code>docNum</code>. Once a document is
|
||||
* deleted it will not appear in TermDocs or TermPositions enumerations.
|
||||
* Attempts to read its field with the {@link #document}
|
||||
* method will result in an error. The presence of this document may still be
|
||||
* reflected in the {@link #docFreq} statistic, though
|
||||
* this will be corrected eventually as the index is further modified.
|
||||
*
|
||||
* @throws StaleReaderException if the index has changed
|
||||
* since this reader was opened
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
acquireWriteLock();
|
||||
hasChanges = true;
|
||||
doDelete(docNum);
|
||||
}
|
||||
|
||||
|
||||
/** Implements deletion of the document numbered <code>docNum</code>.
|
||||
* Applications should call {@link #deleteDocument(int)} or {@link #deleteDocuments(Term)}.
|
||||
*/
|
||||
protected abstract void doDelete(int docNum) throws CorruptIndexException, IOException;
|
||||
|
||||
|
||||
/** Deletes all documents that have a given <code>term</code> indexed.
|
||||
* This is useful if one uses a document field to hold a unique ID string for
|
||||
* the document. Then to delete such a document, one merely constructs a
|
||||
* term with the appropriate field and the unique ID string as its text and
|
||||
* passes it to this method.
|
||||
* See {@link #deleteDocument(int)} for information about when this deletion will
|
||||
* become effective.
|
||||
*
|
||||
* @return the number of documents deleted
|
||||
* @throws StaleReaderException if the index has changed
|
||||
* since this reader was opened
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
DocsEnum docs = MultiFields.getTermDocsEnum(this,
|
||||
MultiFields.getLiveDocs(this),
|
||||
term.field(),
|
||||
term.bytes(),
|
||||
false);
|
||||
if (docs == null) {
|
||||
return 0;
|
||||
}
|
||||
int n = 0;
|
||||
int doc;
|
||||
while ((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
deleteDocument(doc);
|
||||
n++;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/** Undeletes all documents currently marked as deleted in
|
||||
* this index.
|
||||
*
|
||||
* <p>NOTE: this method can only recover documents marked
|
||||
* for deletion but not yet removed from the index; when
|
||||
* and how Lucene removes deleted documents is an
|
||||
* implementation detail, subject to change from release
|
||||
* to release. However, you can use {@link
|
||||
* #numDeletedDocs} on the current IndexReader instance to
|
||||
* see how many documents will be un-deleted.
|
||||
*
|
||||
* @throws StaleReaderException if the index has changed
|
||||
* since this reader was opened
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
acquireWriteLock();
|
||||
hasChanges = true;
|
||||
doUndeleteAll();
|
||||
}
|
||||
|
||||
/** Implements actual undeleteAll() in subclass. */
|
||||
protected abstract void doUndeleteAll() throws CorruptIndexException, IOException;
|
||||
|
||||
/** Does nothing by default. Subclasses that require a write lock for
|
||||
* index modifications must implement this method. */
|
||||
protected synchronized void acquireWriteLock() throws IOException {
|
||||
/* NOOP */
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public final synchronized void flush() throws IOException {
|
||||
ensureOpen();
|
||||
commit();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param commitUserData Opaque Map (String -> String)
|
||||
* that's recorded into the segments file in the index,
|
||||
* and retrievable by {@link
|
||||
* IndexReader#getCommitUserData}.
|
||||
* @throws IOException
|
||||
*/
|
||||
public final synchronized void flush(Map<String, String> commitUserData) throws IOException {
|
||||
ensureOpen();
|
||||
commit(commitUserData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or
|
||||
* setNorm operations
|
||||
*
|
||||
* If an exception is hit, then either no changes or all
|
||||
* changes will have been committed to the index
|
||||
* (transactional semantics).
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
protected final synchronized void commit() throws IOException {
|
||||
commit(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or
|
||||
* setNorm operations
|
||||
*
|
||||
* If an exception is hit, then either no changes or all
|
||||
* changes will have been committed to the index
|
||||
* (transactional semantics).
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public final synchronized void commit(Map<String, String> commitUserData) throws IOException {
|
||||
// Don't call ensureOpen since we commit() on close
|
||||
doCommit(commitUserData);
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
/** Implements commit. */
|
||||
protected abstract void doCommit(Map<String, String> commitUserData) throws IOException;
|
||||
|
||||
/**
|
||||
* Closes files associated with this index.
|
||||
* Also saves any new deletions to disk.
|
||||
|
@ -1397,7 +1061,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* one commit point. But if you're using a custom {@link
|
||||
* IndexDeletionPolicy} then there could be many commits.
|
||||
* Once you have a given commit, you can open a reader on
|
||||
* it by calling {@link IndexReader#open(IndexCommit,boolean)}
|
||||
* it by calling {@link IndexReader#open(IndexCommit)}
|
||||
* There must be at least one commit in
|
||||
* the Directory, else this method throws {@link
|
||||
* IndexNotFoundException}. Note that if a commit is in
|
||||
|
@ -1418,7 +1082,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* that has no sub readers).
|
||||
* <p>
|
||||
* NOTE: You should not try using sub-readers returned by
|
||||
* this method to make any changes (setNorm, deleteDocument,
|
||||
* this method to make any changes (deleteDocument,
|
||||
* etc.). While this might succeed for one composite reader
|
||||
* (like MultiReader), it will most likely lead to index
|
||||
* corruption for other readers (like DirectoryReader obtained
|
||||
|
@ -1444,12 +1108,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* top-level context holds a <code>null</code> {@link CompositeReaderContext#leaves}
|
||||
* reference. Only the top-level context maintains the convenience leaf-view
|
||||
* for performance reasons.
|
||||
* <p>
|
||||
* NOTE: You should not try using sub-readers returned by this method to make
|
||||
* any changes (setNorm, deleteDocument, etc.). While this might succeed for
|
||||
* one composite reader (like MultiReader), it will most likely lead to index
|
||||
* corruption for other readers (like DirectoryReader obtained through
|
||||
* {@link #open}. Use the top-level context's reader directly.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
@ -1470,7 +1128,10 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* Instead, you should call {@link
|
||||
* #getSequentialSubReaders} and ask each sub reader for
|
||||
* its unique term count. */
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public final long getUniqueTermCount() throws IOException {
|
||||
if (!getTopReaderContext().isAtomic) {
|
||||
return -1;
|
||||
}
|
||||
final Fields fields = fields();
|
||||
if (fields == null) {
|
||||
return 0;
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.index.DocumentsWriterPerThread.FlushedSegment;
|
||||
|
@ -618,7 +619,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
SegmentReader sr = ent.getValue();
|
||||
if (sr.hasChanges) {
|
||||
assert infoIsLive(sr.getSegmentInfo(), "key=" + ent.getKey());
|
||||
sr.doCommit(null);
|
||||
sr.doCommit();
|
||||
|
||||
// Must checkpoint w/ deleter, because this
|
||||
// segment reader will have created new _X_N.del
|
||||
|
@ -650,7 +651,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
final SegmentReader sr = readerMap.get(new SegmentCacheKey(info, IOContext.Context.READ));
|
||||
if (sr != null && sr.hasChanges) {
|
||||
assert infoIsLive(info);
|
||||
sr.doCommit(null);
|
||||
sr.doCommit();
|
||||
// Must checkpoint w/ deleter, because this
|
||||
// segment reader will have created new _X_N.del
|
||||
// file.
|
||||
|
@ -697,7 +698,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// TODO: we may want to avoid doing this while
|
||||
// synchronized
|
||||
// Returns a ref, which we xfer to readerMap:
|
||||
sr = SegmentReader.get(false, info.dir, info, doOpenStores, context.context == IOContext.Context.MERGE ? -1 : config.getReaderTermsIndexDivisor(), context);
|
||||
sr = SegmentReader.getRW(info, doOpenStores, context.context == IOContext.Context.MERGE ? -1 : config.getReaderTermsIndexDivisor(), context);
|
||||
sr.readerFinishedListeners = readerFinishedListeners;
|
||||
|
||||
if (info.dir == directory) {
|
||||
|
@ -3980,7 +3981,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
for (String file : files) {
|
||||
assert !IndexFileNames.matchesExtension(file, IndexFileNames.DELETES_EXTENSION)
|
||||
: ".del file is not allowed in .cfs: " + file;
|
||||
assert !IndexFileNames.isSeparateNormsFile(file)
|
||||
assert !isSeparateNormsFile(file)
|
||||
: "separate norms file (.s[0-9]+) is not allowed in .cfs: " + file;
|
||||
directory.copy(cfsDir, file, file, context);
|
||||
checkAbort.work(directory.fileLength(file));
|
||||
|
@ -3991,4 +3992,18 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
|
||||
return files;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns true if the given filename ends with the separate norms file
|
||||
* pattern: {@code SEPARATE_NORMS_EXTENSION + "[0-9]+"}.
|
||||
* @deprecated only for asserting
|
||||
*/
|
||||
@Deprecated
|
||||
private static boolean isSeparateNormsFile(String filename) {
|
||||
int idx = filename.lastIndexOf('.');
|
||||
if (idx == -1) return false;
|
||||
String ext = filename.substring(idx + 1);
|
||||
return Pattern.matches("s[0-9]+", ext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,8 +41,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* left to the subreaders. </p>
|
||||
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
|
||||
* @param subReaders set of (sub)readers
|
||||
*/
|
||||
|
@ -52,8 +50,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* left to the subreaders. </p>
|
||||
* @param closeSubReaders indicates whether the subreaders should be closed
|
||||
* when this MultiReader is closed
|
||||
* @param subReaders set of (sub)readers
|
||||
|
@ -86,11 +82,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
return ReaderUtil.buildReaderContext(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
throw new UnsupportedOperationException("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
|
@ -243,23 +234,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
return hasDeletions;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException {
|
||||
numDocs = -1; // invalidate cache
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
|
||||
hasDeletions = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() throws CorruptIndexException, IOException {
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
subReaders[i].undeleteAll();
|
||||
|
||||
hasDeletions = false;
|
||||
numDocs = -1; // invalidate cache
|
||||
}
|
||||
|
||||
private int readerIndex(int n) { // find reader for doc n:
|
||||
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
|
||||
}
|
||||
|
@ -277,22 +251,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
public synchronized byte[] norms(String field) throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
int total = 0; // sum freqs in segments
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
total += subReaders[i].docFreq(t);
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef t) throws IOException {
|
||||
|
@ -303,12 +261,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) throws IOException {
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
subReaders[i].commit(commitUserData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doClose() throws IOException {
|
||||
|
|
|
@ -21,8 +21,8 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.IOContext.Context;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
// TODO FI: norms could actually be stored as doc store
|
||||
|
@ -32,8 +32,12 @@ import org.apache.lucene.util.IOUtils;
|
|||
* merges all of these together into a single _X.nrm file.
|
||||
*/
|
||||
|
||||
final class NormsWriter extends InvertedDocEndConsumer {
|
||||
|
||||
final class NormsConsumer extends InvertedDocEndConsumer {
|
||||
final NormsFormat normsFormat;
|
||||
|
||||
public NormsConsumer(DocumentsWriterPerThread dwpt) {
|
||||
normsFormat = dwpt.codec.normsFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void abort() {}
|
||||
|
@ -49,29 +53,25 @@ final class NormsWriter extends InvertedDocEndConsumer {
|
|||
return;
|
||||
}
|
||||
|
||||
final String normsFileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.NORMS_EXTENSION);
|
||||
IndexOutput normsOut = state.directory.createOutput(normsFileName, state.context);
|
||||
NormsWriter normsOut = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
normsOut.writeBytes(SegmentNorms.NORMS_HEADER, 0, SegmentNorms.NORMS_HEADER.length);
|
||||
|
||||
int normCount = 0;
|
||||
normsOut = normsFormat.normsWriter(state);
|
||||
|
||||
for (FieldInfo fi : state.fieldInfos) {
|
||||
final NormsWriterPerField toWrite = (NormsWriterPerField) fieldsToFlush.get(fi);
|
||||
final NormsConsumerPerField toWrite = (NormsConsumerPerField) fieldsToFlush.get(fi);
|
||||
int upto = 0;
|
||||
// we must check the final value of omitNorms for the fieldinfo, it could have
|
||||
// changed for this field since the first time we added it.
|
||||
if (!fi.omitNorms && toWrite != null && toWrite.upto > 0) {
|
||||
normCount++;
|
||||
|
||||
normsOut.startField(fi);
|
||||
int docID = 0;
|
||||
for (; docID < state.numDocs; docID++) {
|
||||
if (upto < toWrite.upto && toWrite.docIDs[upto] == docID) {
|
||||
normsOut.writeByte(toWrite.norms[upto]);
|
||||
normsOut.writeNorm(toWrite.norms[upto]);
|
||||
upto++;
|
||||
} else {
|
||||
normsOut.writeByte((byte) 0);
|
||||
normsOut.writeNorm((byte) 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,14 +80,13 @@ final class NormsWriter extends InvertedDocEndConsumer {
|
|||
|
||||
toWrite.reset();
|
||||
} else if (fi.isIndexed && !fi.omitNorms) {
|
||||
normCount++;
|
||||
// Fill entire field with default norm:
|
||||
normsOut.startField(fi);
|
||||
for(;upto<state.numDocs;upto++)
|
||||
normsOut.writeByte((byte) 0);
|
||||
normsOut.writeNorm((byte) 0);
|
||||
}
|
||||
|
||||
assert 4+normCount*(long)state.numDocs == normsOut.getFilePointer() : ".nrm file size mismatch: expected=" + (4+normCount*(long)state.numDocs) + " actual=" + normsOut.getFilePointer();
|
||||
}
|
||||
normsOut.finish(state.numDocs);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success) {
|
||||
|
@ -107,6 +106,6 @@ final class NormsWriter extends InvertedDocEndConsumer {
|
|||
@Override
|
||||
InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField,
|
||||
FieldInfo fieldInfo) {
|
||||
return new NormsWriterPerField(docInverterPerField, fieldInfo);
|
||||
return new NormsConsumerPerField(docInverterPerField, fieldInfo);
|
||||
}
|
||||
}
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.util.ArrayUtil;
|
|||
* just look at the length for the field (docState.length)
|
||||
* and record the norm. */
|
||||
|
||||
final class NormsWriterPerField extends InvertedDocEndConsumerPerField implements Comparable<NormsWriterPerField> {
|
||||
final class NormsConsumerPerField extends InvertedDocEndConsumerPerField implements Comparable<NormsConsumerPerField> {
|
||||
|
||||
final FieldInfo fieldInfo;
|
||||
final DocumentsWriterPerThread.DocState docState;
|
||||
|
@ -45,7 +45,7 @@ final class NormsWriterPerField extends InvertedDocEndConsumerPerField implement
|
|||
upto = 0;
|
||||
}
|
||||
|
||||
public NormsWriterPerField(final DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
|
||||
public NormsConsumerPerField(final DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
|
||||
this.fieldInfo = fieldInfo;
|
||||
docState = docInverterPerField.docState;
|
||||
fieldState = docInverterPerField.fieldState;
|
||||
|
@ -57,7 +57,7 @@ final class NormsWriterPerField extends InvertedDocEndConsumerPerField implement
|
|||
upto = 0;
|
||||
}
|
||||
|
||||
public int compareTo(NormsWriterPerField other) {
|
||||
public int compareTo(NormsConsumerPerField other) {
|
||||
return fieldInfo.name.compareTo(other.fieldInfo.name);
|
||||
}
|
||||
|
|
@ -332,24 +332,6 @@ public class ParallelReader extends IndexReader {
|
|||
return hasDeletions;
|
||||
}
|
||||
|
||||
// delete in all readers
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException {
|
||||
for (final IndexReader reader : readers) {
|
||||
reader.deleteDocument(n);
|
||||
}
|
||||
hasDeletions = true;
|
||||
}
|
||||
|
||||
// undeleteAll in all readers
|
||||
@Override
|
||||
protected void doUndeleteAll() throws CorruptIndexException, IOException {
|
||||
for (final IndexReader reader : readers) {
|
||||
reader.undeleteAll();
|
||||
}
|
||||
hasDeletions = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
|
@ -402,25 +384,6 @@ public class ParallelReader extends IndexReader {
|
|||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
IndexReader reader = fieldToReader.get(field);
|
||||
if (reader!=null) {
|
||||
synchronized(normsCache) {
|
||||
normsCache.remove(field);
|
||||
}
|
||||
reader.doSetNorm(n, field, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term term) throws IOException {
|
||||
ensureOpen();
|
||||
IndexReader reader = fieldToReader.get(term.field());
|
||||
return reader==null ? 0 : reader.docFreq(term);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef term) throws IOException {
|
||||
ensureOpen();
|
||||
|
@ -457,12 +420,6 @@ public class ParallelReader extends IndexReader {
|
|||
return readers.toArray(new IndexReader[readers.size()]);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) throws IOException {
|
||||
for (final IndexReader reader : readers)
|
||||
reader.commit(commitUserData);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doClose() throws IOException {
|
||||
for (int i = 0; i < readers.size(); i++) {
|
||||
|
|
|
@ -62,7 +62,7 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy {
|
|||
* keeps a lock on the snapshots directory).
|
||||
*/
|
||||
public static Map<String, String> readSnapshotsInfo(Directory dir) throws IOException {
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
Map<String, String> snapshots = new HashMap<String, String>();
|
||||
try {
|
||||
int numDocs = r.numDocs();
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.NormsReader;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.FieldsProducer;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsReader;
|
||||
|
@ -48,6 +49,7 @@ final class SegmentCoreReaders {
|
|||
|
||||
final FieldsProducer fields;
|
||||
final PerDocValues perDocProducer;
|
||||
final NormsReader norms;
|
||||
|
||||
final Directory dir;
|
||||
final Directory cfsDir;
|
||||
|
@ -92,6 +94,10 @@ final class SegmentCoreReaders {
|
|||
// Ask codec for its Fields
|
||||
fields = format.fieldsProducer(segmentReadState);
|
||||
assert fields != null;
|
||||
// ask codec for its Norms:
|
||||
// TODO: since we don't write any norms file if there are no norms,
|
||||
// kinda jaky to assume the codec handles the case of no norms file at all gracefully?!
|
||||
norms = codec.normsFormat().normsReader(cfsDir, si, fieldInfos, context, dir);
|
||||
perDocProducer = codec.docValuesFormat().docsProducer(segmentReadState);
|
||||
success = true;
|
||||
} finally {
|
||||
|
@ -126,7 +132,7 @@ final class SegmentCoreReaders {
|
|||
synchronized void decRef() throws IOException {
|
||||
if (ref.decrementAndGet() == 0) {
|
||||
IOUtils.close(fields, perDocProducer, termVectorsReaderOrig,
|
||||
fieldsReaderOrig, cfsReader, storeCFSReader);
|
||||
fieldsReaderOrig, cfsReader, storeCFSReader, norms);
|
||||
// Now, notify any ReaderFinished listeners:
|
||||
if (owner != null) {
|
||||
owner.notifyReaderFinishedListeners();
|
||||
|
|
|
@ -48,7 +48,7 @@ public final class SegmentInfo implements Cloneable {
|
|||
// TODO: remove these from this class, for now this is the representation
|
||||
public static final int NO = -1; // e.g. no norms; no deletes;
|
||||
public static final int YES = 1; // e.g. have norms; have deletes;
|
||||
static final int WITHOUT_GEN = 0; // a file name that has no GEN in it.
|
||||
public static final int WITHOUT_GEN = 0; // a file name that has no GEN in it.
|
||||
|
||||
public String name; // unique name in dir
|
||||
public int docCount; // number of docs in seg
|
||||
|
@ -337,23 +337,10 @@ public final class SegmentInfo implements Cloneable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns true if this field for this segment has saved a separate norms file (_<segment>_N.sX).
|
||||
*
|
||||
* @param fieldNumber the field index to check
|
||||
* @deprecated separate norms are not supported in >= 4.0
|
||||
*/
|
||||
public boolean hasSeparateNorms(int fieldNumber) {
|
||||
if (normGen == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long gen = normGen.get(fieldNumber);
|
||||
return gen != null && gen.longValue() != NO;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if any fields in this segment have separate norms.
|
||||
*/
|
||||
public boolean hasSeparateNorms() {
|
||||
@Deprecated
|
||||
boolean hasSeparateNorms() {
|
||||
if (normGen == null) {
|
||||
return false;
|
||||
} else {
|
||||
|
@ -367,42 +354,6 @@ public final class SegmentInfo implements Cloneable {
|
|||
return false;
|
||||
}
|
||||
|
||||
void initNormGen() {
|
||||
if (normGen == null) { // normGen is null if this segments file hasn't had any norms set against it yet
|
||||
normGen = new HashMap<Integer, Long>();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the generation count for the norms file for
|
||||
* this field.
|
||||
*
|
||||
* @param fieldIndex field whose norm file will be rewritten
|
||||
*/
|
||||
void advanceNormGen(int fieldIndex) {
|
||||
Long gen = normGen.get(fieldIndex);
|
||||
if (gen == null || gen.longValue() == NO) {
|
||||
normGen.put(fieldIndex, new Long(YES));
|
||||
} else {
|
||||
normGen.put(fieldIndex, gen+1);
|
||||
}
|
||||
clearFilesCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the file name for the norms file for this field.
|
||||
*
|
||||
* @param number field index
|
||||
*/
|
||||
public String getNormFileName(int number) {
|
||||
if (hasSeparateNorms(number)) {
|
||||
return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
|
||||
} else {
|
||||
// single file for all norms
|
||||
return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.NORMS_EXTENSION, WITHOUT_GEN);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark whether this segment is stored as a compound file.
|
||||
*
|
||||
|
@ -516,11 +467,6 @@ public final class SegmentInfo implements Cloneable {
|
|||
return codec;
|
||||
}
|
||||
|
||||
private void addIfExists(Set<String> files, String fileName) throws IOException {
|
||||
if (dir.fileExists(fileName))
|
||||
files.add(fileName);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return all files referenced by this SegmentInfo. The
|
||||
* returns List is a locally cached List so you should not
|
||||
|
@ -546,9 +492,6 @@ public final class SegmentInfo implements Cloneable {
|
|||
IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
|
||||
}
|
||||
} else {
|
||||
for(String ext : IndexFileNames.NON_STORE_INDEX_EXTENSIONS) {
|
||||
addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", ext));
|
||||
}
|
||||
codec.files(dir, this, fileSet);
|
||||
}
|
||||
|
||||
|
@ -566,15 +509,12 @@ public final class SegmentInfo implements Cloneable {
|
|||
if (delFileName != null && (delGen >= YES || dir.fileExists(delFileName))) {
|
||||
fileSet.add(delFileName);
|
||||
}
|
||||
|
||||
|
||||
// because separate norm files are unconditionally stored outside cfs,
|
||||
// we must explicitly ask for their filenames if we might have separate norms:
|
||||
// remove this when 3.x indexes are no longer supported
|
||||
if (normGen != null) {
|
||||
for (Entry<Integer,Long> entry : normGen.entrySet()) {
|
||||
long gen = entry.getValue();
|
||||
if (gen >= YES) {
|
||||
// Definitely a separate norm file, with generation:
|
||||
fileSet.add(IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + entry.getKey(), gen));
|
||||
}
|
||||
}
|
||||
codec.normsFormat().separateFiles(dir, this, fileSet);
|
||||
}
|
||||
|
||||
files = new ArrayList<String>(fileSet);
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -30,6 +29,7 @@ import org.apache.lucene.index.IndexReader.FieldOption;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.FieldInfosWriter;
|
||||
import org.apache.lucene.index.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.index.codecs.PerDocConsumer;
|
||||
import org.apache.lucene.index.codecs.TermVectorsWriter;
|
||||
|
@ -38,7 +38,6 @@ import org.apache.lucene.index.values.TypePromoter;
|
|||
import org.apache.lucene.index.values.ValueType;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
|
@ -126,7 +125,11 @@ final class SegmentMerger {
|
|||
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, segment, mergeState.fieldInfos, mergeState.mergedDocCount, termIndexInterval, codec, null, context);
|
||||
mergeTerms(segmentWriteState);
|
||||
mergePerDoc(segmentWriteState);
|
||||
mergeNorms();
|
||||
|
||||
if (mergeState.fieldInfos.hasNorms()) {
|
||||
int numMerged = mergeNorms(segmentWriteState);
|
||||
assert numMerged == mergeState.mergedDocCount;
|
||||
}
|
||||
|
||||
if (mergeState.fieldInfos.hasVectors()) {
|
||||
int numMerged = mergeVectors();
|
||||
|
@ -379,48 +382,19 @@ final class SegmentMerger {
|
|||
}
|
||||
}
|
||||
|
||||
private void mergeNorms() throws IOException {
|
||||
IndexOutput output = null;
|
||||
private int mergeNorms(SegmentWriteState segmentWriteState) throws IOException {
|
||||
final NormsWriter writer = codec.normsFormat().normsWriter(segmentWriteState);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
for (FieldInfo fi : mergeState.fieldInfos) {
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
if (output == null) {
|
||||
output = directory.createOutput(IndexFileNames.segmentFileName(segment, "", IndexFileNames.NORMS_EXTENSION), context);
|
||||
output.writeBytes(SegmentNorms.NORMS_HEADER, SegmentNorms.NORMS_HEADER.length);
|
||||
}
|
||||
for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
|
||||
final int maxDoc = reader.reader.maxDoc();
|
||||
byte normBuffer[] = reader.reader.norms(fi.name);
|
||||
if (normBuffer == null) {
|
||||
// Can be null if this segment doesn't have
|
||||
// any docs with this field
|
||||
normBuffer = new byte[maxDoc];
|
||||
Arrays.fill(normBuffer, (byte)0);
|
||||
}
|
||||
if (reader.liveDocs == null) {
|
||||
//optimized case for segments without deleted docs
|
||||
output.writeBytes(normBuffer, maxDoc);
|
||||
} else {
|
||||
// this segment has deleted docs, so we have to
|
||||
// check for every doc if it is deleted or not
|
||||
final Bits liveDocs = reader.liveDocs;
|
||||
for (int k = 0; k < maxDoc; k++) {
|
||||
if (liveDocs.get(k)) {
|
||||
output.writeByte(normBuffer[k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
mergeState.checkAbort.work(maxDoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
int numMerged = writer.merge(mergeState);
|
||||
success = true;
|
||||
return numMerged;
|
||||
} finally {
|
||||
if (success) {
|
||||
IOUtils.close(output);
|
||||
IOUtils.close(writer);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(output);
|
||||
IOUtils.closeWhileHandlingException(writer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,245 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.lucene.store.FlushInfo;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
/**
|
||||
* Byte[] referencing is used because a new norm object needs
|
||||
* to be created for each clone, and the byte array is all
|
||||
* that is needed for sharing between cloned readers. The
|
||||
* current norm referencing is for sharing between readers
|
||||
* whereas the byte[] referencing is for copy on write which
|
||||
* is independent of reader references (i.e. incRef, decRef).
|
||||
*/
|
||||
|
||||
final class SegmentNorms implements Cloneable {
|
||||
|
||||
/** norms header placeholder */
|
||||
static final byte[] NORMS_HEADER = new byte[]{'N','R','M',-1};
|
||||
|
||||
int refCount = 1;
|
||||
|
||||
// If this instance is a clone, the originalNorm
|
||||
// references the Norm that has a real open IndexInput:
|
||||
private SegmentNorms origNorm;
|
||||
|
||||
private IndexInput in;
|
||||
private long normSeek;
|
||||
|
||||
// null until bytes is set
|
||||
private AtomicInteger bytesRef;
|
||||
private byte[] bytes;
|
||||
private int number;
|
||||
|
||||
boolean dirty;
|
||||
boolean rollbackDirty;
|
||||
|
||||
private final SegmentReader owner;
|
||||
|
||||
public SegmentNorms(IndexInput in, int number, long normSeek, SegmentReader owner) {
|
||||
this.in = in;
|
||||
this.number = number;
|
||||
this.normSeek = normSeek;
|
||||
this.owner = owner;
|
||||
}
|
||||
|
||||
public synchronized void incRef() {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
refCount++;
|
||||
}
|
||||
|
||||
private void closeInput() throws IOException {
|
||||
if (in != null) {
|
||||
if (in != owner.singleNormStream) {
|
||||
// It's private to us -- just close it
|
||||
in.close();
|
||||
} else {
|
||||
// We are sharing this with others -- decRef and
|
||||
// maybe close the shared norm stream
|
||||
if (owner.singleNormRef.decrementAndGet() == 0) {
|
||||
owner.singleNormStream.close();
|
||||
owner.singleNormStream = null;
|
||||
}
|
||||
}
|
||||
|
||||
in = null;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void decRef() throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
|
||||
if (--refCount == 0) {
|
||||
if (origNorm != null) {
|
||||
origNorm.decRef();
|
||||
origNorm = null;
|
||||
} else {
|
||||
closeInput();
|
||||
}
|
||||
|
||||
if (bytes != null) {
|
||||
assert bytesRef != null;
|
||||
bytesRef.decrementAndGet();
|
||||
bytes = null;
|
||||
bytesRef = null;
|
||||
} else {
|
||||
assert bytesRef == null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load & cache full bytes array. Returns bytes.
|
||||
public synchronized byte[] bytes() throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
if (bytes == null) { // value not yet read
|
||||
assert bytesRef == null;
|
||||
if (origNorm != null) {
|
||||
// Ask origNorm to load so that for a series of
|
||||
// reopened readers we share a single read-only
|
||||
// byte[]
|
||||
bytes = origNorm.bytes();
|
||||
bytesRef = origNorm.bytesRef;
|
||||
bytesRef.incrementAndGet();
|
||||
|
||||
// Once we've loaded the bytes we no longer need
|
||||
// origNorm:
|
||||
origNorm.decRef();
|
||||
origNorm = null;
|
||||
|
||||
} else {
|
||||
// We are the origNorm, so load the bytes for real
|
||||
// ourself:
|
||||
final int count = owner.maxDoc();
|
||||
bytes = new byte[count];
|
||||
|
||||
// Since we are orig, in must not be null
|
||||
assert in != null;
|
||||
|
||||
// Read from disk.
|
||||
synchronized(in) {
|
||||
in.seek(normSeek);
|
||||
in.readBytes(bytes, 0, count, false);
|
||||
}
|
||||
|
||||
bytesRef = new AtomicInteger(1);
|
||||
closeInput();
|
||||
}
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// Only for testing
|
||||
AtomicInteger bytesRef() {
|
||||
return bytesRef;
|
||||
}
|
||||
|
||||
// Called if we intend to change a norm value. We make a
|
||||
// private copy of bytes if it's shared with others:
|
||||
public synchronized byte[] copyOnWrite() throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
bytes();
|
||||
assert bytes != null;
|
||||
assert bytesRef != null;
|
||||
if (bytesRef.get() > 1) {
|
||||
// I cannot be the origNorm for another norm
|
||||
// instance if I'm being changed. Ie, only the
|
||||
// "head Norm" can be changed:
|
||||
assert refCount == 1;
|
||||
final AtomicInteger oldRef = bytesRef;
|
||||
bytes = owner.cloneNormBytes(bytes);
|
||||
bytesRef = new AtomicInteger(1);
|
||||
oldRef.decrementAndGet();
|
||||
}
|
||||
dirty = true;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// Returns a copy of this Norm instance that shares
|
||||
// IndexInput & bytes with the original one
|
||||
@Override
|
||||
public synchronized Object clone() {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
|
||||
SegmentNorms clone;
|
||||
try {
|
||||
clone = (SegmentNorms) super.clone();
|
||||
} catch (CloneNotSupportedException cnse) {
|
||||
// Cannot happen
|
||||
throw new RuntimeException("unexpected CloneNotSupportedException", cnse);
|
||||
}
|
||||
clone.refCount = 1;
|
||||
|
||||
if (bytes != null) {
|
||||
assert bytesRef != null;
|
||||
assert origNorm == null;
|
||||
|
||||
// Clone holds a reference to my bytes:
|
||||
clone.bytesRef.incrementAndGet();
|
||||
} else {
|
||||
assert bytesRef == null;
|
||||
if (origNorm == null) {
|
||||
// I become the origNorm for the clone:
|
||||
clone.origNorm = this;
|
||||
}
|
||||
clone.origNorm.incRef();
|
||||
}
|
||||
|
||||
// Only the origNorm will actually readBytes from in:
|
||||
clone.in = null;
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
// Flush all pending changes to the next generation
|
||||
// separate norms file.
|
||||
public void reWrite(SegmentInfo si) throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0): "refCount=" + refCount + " origNorm=" + origNorm;
|
||||
|
||||
// NOTE: norms are re-written in regular directory, not cfs
|
||||
si.advanceNormGen(this.number);
|
||||
final String normFileName = si.getNormFileName(this.number);
|
||||
IndexOutput out = owner.directory().createOutput(normFileName, new IOContext(new FlushInfo(si.docCount, 0)));
|
||||
boolean success = false;
|
||||
try {
|
||||
try {
|
||||
out.writeBytes(SegmentNorms.NORMS_HEADER, 0, SegmentNorms.NORMS_HEADER.length);
|
||||
out.writeBytes(bytes, owner.maxDoc());
|
||||
} finally {
|
||||
out.close();
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
owner.directory().deleteFile(normFileName);
|
||||
} catch (Throwable t) {
|
||||
// suppress this so we keep throwing the
|
||||
// original exception
|
||||
}
|
||||
}
|
||||
}
|
||||
this.dirty = false;
|
||||
}
|
||||
}
|
|
@ -20,15 +20,12 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
|
@ -37,37 +34,30 @@ import org.apache.lucene.store.IOContext;
|
|||
import org.apache.lucene.util.BitVector;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.CloseableThreadLocal;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SegmentReader extends IndexReader implements Cloneable {
|
||||
protected boolean readOnly;
|
||||
public final class SegmentReader extends IndexReader implements Cloneable {
|
||||
private final boolean readOnly;
|
||||
|
||||
private SegmentInfo si;
|
||||
private final ReaderContext readerContext = new AtomicReaderContext(this);
|
||||
CloseableThreadLocal<StoredFieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
|
||||
CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
|
||||
final CloseableThreadLocal<StoredFieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
|
||||
final CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
|
||||
|
||||
volatile BitVector liveDocs;
|
||||
AtomicInteger liveDocsRef = null;
|
||||
boolean hasChanges = false;
|
||||
private boolean liveDocsDirty = false;
|
||||
private boolean normsDirty = false;
|
||||
|
||||
// TODO: we should move this tracking into SegmentInfo;
|
||||
// this way SegmentInfo.toString shows pending deletes
|
||||
// TODO: remove deletions from SR
|
||||
private int pendingDeleteCount;
|
||||
|
||||
private boolean rollbackHasChanges = false;
|
||||
private boolean rollbackDeletedDocsDirty = false;
|
||||
private boolean rollbackNormsDirty = false;
|
||||
private SegmentInfo rollbackSegmentInfo;
|
||||
private int rollbackPendingDeleteCount;
|
||||
|
||||
// optionally used for the .nrm file shared by multiple norms
|
||||
IndexInput singleNormStream;
|
||||
AtomicInteger singleNormRef;
|
||||
// end TODO
|
||||
|
||||
SegmentCoreReaders core;
|
||||
|
||||
|
@ -80,41 +70,39 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return core.getFieldsReaderOrig().clone();
|
||||
}
|
||||
}
|
||||
|
||||
Map<String,SegmentNorms> norms = new HashMap<String,SegmentNorms>();
|
||||
|
||||
/**
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static SegmentReader get(boolean readOnly, SegmentInfo si, int termInfosIndexDivisor, IOContext context) throws CorruptIndexException, IOException {
|
||||
return get(readOnly, si.dir, si, true, termInfosIndexDivisor, context);
|
||||
public static SegmentReader get(SegmentInfo si, int termInfosIndexDivisor, IOContext context) throws CorruptIndexException, IOException {
|
||||
return get(true, si, true, termInfosIndexDivisor, context);
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
static SegmentReader getRW(SegmentInfo si, boolean doOpenStores, int termInfosIndexDivisor, IOContext context) throws CorruptIndexException, IOException {
|
||||
return get(false, si, doOpenStores, termInfosIndexDivisor, context);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static SegmentReader get(boolean readOnly,
|
||||
Directory dir,
|
||||
private static SegmentReader get(boolean readOnly,
|
||||
SegmentInfo si,
|
||||
boolean doOpenStores,
|
||||
int termInfosIndexDivisor,
|
||||
IOContext context)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
SegmentReader instance = new SegmentReader();
|
||||
instance.readOnly = readOnly;
|
||||
instance.si = si;
|
||||
SegmentReader instance = new SegmentReader(readOnly, si);
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
instance.core = new SegmentCoreReaders(instance, dir, si, context, termInfosIndexDivisor);
|
||||
instance.core = new SegmentCoreReaders(instance, si.dir, si, context, termInfosIndexDivisor);
|
||||
if (doOpenStores) {
|
||||
instance.core.openDocStores(si);
|
||||
}
|
||||
instance.loadLiveDocs(context);
|
||||
instance.openNorms(instance.core.cfsDir, context);
|
||||
success = true;
|
||||
} finally {
|
||||
|
||||
|
@ -130,6 +118,11 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return instance;
|
||||
}
|
||||
|
||||
private SegmentReader(boolean readOnly, SegmentInfo si) {
|
||||
this.readOnly = readOnly;
|
||||
this.si = si;
|
||||
}
|
||||
|
||||
void openDocStores() throws IOException {
|
||||
core.openDocStores(si);
|
||||
}
|
||||
|
@ -158,7 +151,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
|
||||
private void loadLiveDocs(IOContext context) throws IOException {
|
||||
// NOTE: the bitvector is stored using the regular directory, not cfs
|
||||
if (hasDeletions(si)) {
|
||||
if (si.hasDeletions()) {
|
||||
liveDocs = new BitVector(directory(), si.getDelFileName(), new IOContext(context, true));
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
assert checkLiveCounts();
|
||||
|
@ -168,27 +161,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
} else
|
||||
assert si.getDelCount() == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the norm bytes. May be overridden by subclasses. New and experimental.
|
||||
* @param bytes Byte array to clone
|
||||
* @return New BitVector
|
||||
*/
|
||||
protected byte[] cloneNormBytes(byte[] bytes) {
|
||||
byte[] cloneBytes = new byte[bytes.length];
|
||||
System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
|
||||
return cloneBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
|
||||
* @param bv BitVector to clone
|
||||
* @return New BitVector
|
||||
*/
|
||||
protected BitVector cloneDeletedDocs(BitVector bv) {
|
||||
ensureOpen();
|
||||
return (BitVector)bv.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final synchronized Object clone() {
|
||||
|
@ -199,175 +171,16 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
return reopenSegment(si, true, openReadOnly);
|
||||
// used by DirectoryReader:
|
||||
synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException {
|
||||
return reopenSegment(si, doClone, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized IndexReader doOpenIfChanged()
|
||||
throws CorruptIndexException, IOException {
|
||||
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
||||
return reopenSegment(si, false, readOnly);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized IndexReader doOpenIfChanged(boolean openReadOnly)
|
||||
throws CorruptIndexException, IOException {
|
||||
return reopenSegment(si, false, openReadOnly);
|
||||
}
|
||||
|
||||
synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
|
||||
&& (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
|
||||
boolean normsUpToDate = true;
|
||||
|
||||
Set<Integer> fieldNormsChanged = new HashSet<Integer>();
|
||||
for (FieldInfo fi : core.fieldInfos) {
|
||||
int fieldNumber = fi.number;
|
||||
if (!this.si.getNormFileName(fieldNumber).equals(si.getNormFileName(fieldNumber))) {
|
||||
normsUpToDate = false;
|
||||
fieldNormsChanged.add(fieldNumber);
|
||||
}
|
||||
}
|
||||
|
||||
// if we're cloning we need to run through the reopenSegment logic
|
||||
// also if both old and new readers aren't readonly, we clone to avoid sharing modifications
|
||||
if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// When cloning, the incoming SegmentInfos should not
|
||||
// have any changes in it:
|
||||
assert !doClone || (normsUpToDate && deletionsUpToDate);
|
||||
|
||||
// clone reader
|
||||
SegmentReader clone = new SegmentReader();
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
core.incRef();
|
||||
clone.core = core;
|
||||
clone.readOnly = openReadOnly;
|
||||
clone.si = si;
|
||||
clone.pendingDeleteCount = pendingDeleteCount;
|
||||
clone.readerFinishedListeners = readerFinishedListeners;
|
||||
|
||||
if (!openReadOnly && hasChanges) {
|
||||
// My pending changes transfer to the new reader
|
||||
clone.liveDocsDirty = liveDocsDirty;
|
||||
clone.normsDirty = normsDirty;
|
||||
clone.hasChanges = hasChanges;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
if (doClone) {
|
||||
if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
} else {
|
||||
if (!deletionsUpToDate) {
|
||||
// load deleted docs
|
||||
assert clone.liveDocs == null;
|
||||
clone.loadLiveDocs(IOContext.READ);
|
||||
} else if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
}
|
||||
|
||||
clone.norms = new HashMap<String,SegmentNorms>();
|
||||
|
||||
// Clone norms
|
||||
for (FieldInfo fi : core.fieldInfos) {
|
||||
// Clone unchanged norms to the cloned reader
|
||||
if (doClone || !fieldNormsChanged.contains(fi.number)) {
|
||||
final String curField = fi.name;
|
||||
SegmentNorms norm = this.norms.get(curField);
|
||||
if (norm != null)
|
||||
clone.norms.put(curField, (SegmentNorms) norm.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not cloning, then this will open anew
|
||||
// any norms that have changed:
|
||||
clone.openNorms(si.getUseCompoundFile() ? core.getCFSReader() : directory(), IOContext.DEFAULT);
|
||||
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
// An exception occurred during reopen, we have to decRef the norms
|
||||
// that we incRef'ed already and close singleNormsStream and FieldsReader
|
||||
clone.decRef();
|
||||
}
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doCommit(Map<String,String> commitUserData) throws IOException {
|
||||
if (hasChanges) {
|
||||
startCommit();
|
||||
boolean success = false;
|
||||
try {
|
||||
commitChanges(commitUserData);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
rollbackCommit();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void commitChanges(Map<String,String> commitUserData) throws IOException {
|
||||
if (liveDocsDirty) { // re-write deleted
|
||||
si.advanceDelGen();
|
||||
|
||||
assert liveDocs.length() == si.docCount;
|
||||
|
||||
// We can write directly to the actual name (vs to a
|
||||
// .tmp & renaming it) because the file is not live
|
||||
// until segments file is written:
|
||||
final String delFileName = si.getDelFileName();
|
||||
boolean success = false;
|
||||
try {
|
||||
liveDocs.write(directory(), delFileName, IOContext.DEFAULT);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
directory().deleteFile(delFileName);
|
||||
} catch (Throwable t) {
|
||||
// suppress this so we keep throwing the
|
||||
// original exception
|
||||
}
|
||||
}
|
||||
}
|
||||
si.setDelCount(si.getDelCount()+pendingDeleteCount);
|
||||
pendingDeleteCount = 0;
|
||||
assert (maxDoc()-liveDocs.count()) == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + (maxDoc()-liveDocs.count());
|
||||
} else {
|
||||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
|
||||
if (normsDirty) { // re-write norms
|
||||
si.initNormGen();
|
||||
for (final SegmentNorms norm : norms.values()) {
|
||||
if (norm.dirty) {
|
||||
norm.reWrite(si);
|
||||
}
|
||||
}
|
||||
}
|
||||
liveDocsDirty = false;
|
||||
normsDirty = false;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
|
||||
/** @lucene.internal */
|
||||
public StoredFieldsReader getFieldsReader() {
|
||||
return fieldsReaderLocal.get();
|
||||
|
@ -375,6 +188,10 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
if (hasChanges) {
|
||||
doCommit();
|
||||
}
|
||||
|
||||
termVectorsLocal.close();
|
||||
fieldsReaderLocal.close();
|
||||
|
||||
|
@ -384,72 +201,17 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
liveDocs = null;
|
||||
}
|
||||
|
||||
for (final SegmentNorms norm : norms.values()) {
|
||||
norm.decRef();
|
||||
}
|
||||
if (core != null) {
|
||||
core.decRef();
|
||||
}
|
||||
}
|
||||
|
||||
static boolean hasDeletions(SegmentInfo si) throws IOException {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return si.hasDeletions();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return liveDocs != null;
|
||||
}
|
||||
|
||||
static boolean usesCompoundFile(SegmentInfo si) throws IOException {
|
||||
return si.getUseCompoundFile();
|
||||
}
|
||||
|
||||
static boolean hasSeparateNorms(SegmentInfo si) throws IOException {
|
||||
return si.hasSeparateNorms();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int docNum) {
|
||||
if (liveDocs == null) {
|
||||
liveDocs = new BitVector(maxDoc());
|
||||
liveDocs.setAll();
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
}
|
||||
// there is more than 1 SegmentReader with a reference to this
|
||||
// liveDocs BitVector so decRef the current liveDocsRef,
|
||||
// clone the BitVector, create a new liveDocsRef
|
||||
if (liveDocsRef.get() > 1) {
|
||||
AtomicInteger oldRef = liveDocsRef;
|
||||
liveDocs = cloneDeletedDocs(liveDocs);
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
oldRef.decrementAndGet();
|
||||
}
|
||||
liveDocsDirty = true;
|
||||
if (liveDocs.getAndClear(docNum)) {
|
||||
pendingDeleteCount++;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() {
|
||||
liveDocsDirty = false;
|
||||
if (liveDocs != null) {
|
||||
assert liveDocsRef != null;
|
||||
liveDocsRef.decrementAndGet();
|
||||
liveDocs = null;
|
||||
liveDocsRef = null;
|
||||
pendingDeleteCount = 0;
|
||||
si.clearDelGen();
|
||||
si.setDelCount(0);
|
||||
} else {
|
||||
assert liveDocsRef == null;
|
||||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
}
|
||||
|
||||
List<String> files() throws IOException {
|
||||
return new ArrayList<String>(si.files());
|
||||
}
|
||||
|
@ -547,107 +309,14 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
@Override
|
||||
public boolean hasNorms(String field) {
|
||||
ensureOpen();
|
||||
return norms.containsKey(field);
|
||||
FieldInfo fi = core.fieldInfos.fieldInfo(field);
|
||||
return fi != null && fi.isIndexed && !fi.omitNorms;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] norms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
final SegmentNorms norm = norms.get(field);
|
||||
if (norm == null) {
|
||||
// not indexed, or norms not stored
|
||||
return null;
|
||||
}
|
||||
return norm.bytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int doc, String field, byte value)
|
||||
throws IOException {
|
||||
SegmentNorms norm = norms.get(field);
|
||||
if (norm == null) {
|
||||
// field does not store norms
|
||||
throw new IllegalStateException("Cannot setNorm for field " + field + ": norms were omitted");
|
||||
}
|
||||
|
||||
normsDirty = true;
|
||||
norm.copyOnWrite()[doc] = value; // set the value
|
||||
}
|
||||
|
||||
private void openNorms(Directory cfsDir, IOContext context) throws IOException {
|
||||
boolean normsInitiallyEmpty = norms.isEmpty(); // only used for assert
|
||||
long nextNormSeek = SegmentNorms.NORMS_HEADER.length; //skip header (header unused for now)
|
||||
int maxDoc = maxDoc();
|
||||
for (FieldInfo fi : core.fieldInfos) {
|
||||
if (norms.containsKey(fi.name)) {
|
||||
// in case this SegmentReader is being re-opened, we might be able to
|
||||
// reuse some norm instances and skip loading them here
|
||||
continue;
|
||||
}
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
Directory d = directory();
|
||||
String fileName = si.getNormFileName(fi.number);
|
||||
if (!si.hasSeparateNorms(fi.number)) {
|
||||
d = cfsDir;
|
||||
}
|
||||
|
||||
// singleNormFile means multiple norms share this file
|
||||
boolean singleNormFile = IndexFileNames.matchesExtension(fileName, IndexFileNames.NORMS_EXTENSION);
|
||||
IndexInput normInput = null;
|
||||
long normSeek;
|
||||
|
||||
if (singleNormFile) {
|
||||
normSeek = nextNormSeek;
|
||||
if (singleNormStream == null) {
|
||||
singleNormStream = d.openInput(fileName, context);
|
||||
singleNormRef = new AtomicInteger(1);
|
||||
} else {
|
||||
singleNormRef.incrementAndGet();
|
||||
}
|
||||
// All norms in the .nrm file can share a single IndexInput since
|
||||
// they are only used in a synchronized context.
|
||||
// If this were to change in the future, a clone could be done here.
|
||||
normInput = singleNormStream;
|
||||
} else {
|
||||
normInput = d.openInput(fileName, context);
|
||||
// if the segment was created in 3.2 or after, we wrote the header for sure,
|
||||
// and don't need to do the sketchy file size check. otherwise, we check
|
||||
// if the size is exactly equal to maxDoc to detect a headerless file.
|
||||
// NOTE: remove this check in Lucene 5.0!
|
||||
String version = si.getVersion();
|
||||
final boolean isUnversioned =
|
||||
(version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
|
||||
&& normInput.length() == maxDoc();
|
||||
if (isUnversioned) {
|
||||
normSeek = 0;
|
||||
} else {
|
||||
normSeek = SegmentNorms.NORMS_HEADER.length;
|
||||
}
|
||||
}
|
||||
|
||||
norms.put(fi.name, new SegmentNorms(normInput, fi.number, normSeek, this));
|
||||
nextNormSeek += maxDoc; // increment also if some norms are separate
|
||||
}
|
||||
}
|
||||
assert singleNormStream == null || !normsInitiallyEmpty || nextNormSeek == singleNormStream.length();
|
||||
}
|
||||
|
||||
// for testing only
|
||||
boolean normsClosed() {
|
||||
if (singleNormStream != null) {
|
||||
return false;
|
||||
}
|
||||
for (final SegmentNorms norm : norms.values()) {
|
||||
if (norm.refCount > 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// for testing only
|
||||
boolean normsClosed(String field) {
|
||||
return norms.get(field).refCount == 0;
|
||||
return core.norms.norms(field);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -689,7 +358,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return termVectorsReader.get(docID);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder buffer = new StringBuilder();
|
||||
|
@ -724,28 +392,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
si = info;
|
||||
}
|
||||
|
||||
void startCommit() {
|
||||
rollbackSegmentInfo = (SegmentInfo) si.clone();
|
||||
rollbackHasChanges = hasChanges;
|
||||
rollbackDeletedDocsDirty = liveDocsDirty;
|
||||
rollbackNormsDirty = normsDirty;
|
||||
rollbackPendingDeleteCount = pendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
norm.rollbackDirty = norm.dirty;
|
||||
}
|
||||
}
|
||||
|
||||
void rollbackCommit() {
|
||||
si.reset(rollbackSegmentInfo);
|
||||
hasChanges = rollbackHasChanges;
|
||||
liveDocsDirty = rollbackDeletedDocsDirty;
|
||||
normsDirty = rollbackNormsDirty;
|
||||
pendingDeleteCount = rollbackPendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
norm.dirty = norm.rollbackDirty;
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the directory this index resides in. */
|
||||
@Override
|
||||
public Directory directory() {
|
||||
|
@ -783,4 +429,178 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
ensureOpen();
|
||||
return core.perDocProducer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
|
||||
* @param bv BitVector to clone
|
||||
* @return New BitVector
|
||||
*/
|
||||
// TODO: remove deletions from SR
|
||||
BitVector cloneDeletedDocs(BitVector bv) {
|
||||
ensureOpen();
|
||||
return (BitVector)bv.clone();
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
return reopenSegment(si, true, openReadOnly);
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
private synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
|
||||
&& (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
|
||||
|
||||
// if we're cloning we need to run through the reopenSegment logic
|
||||
// also if both old and new readers aren't readonly, we clone to avoid sharing modifications
|
||||
if (deletionsUpToDate && !doClone && openReadOnly && readOnly) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// When cloning, the incoming SegmentInfos should not
|
||||
// have any changes in it:
|
||||
assert !doClone || (deletionsUpToDate);
|
||||
|
||||
// clone reader
|
||||
SegmentReader clone = new SegmentReader(openReadOnly, si);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
core.incRef();
|
||||
clone.core = core;
|
||||
clone.pendingDeleteCount = pendingDeleteCount;
|
||||
clone.readerFinishedListeners = readerFinishedListeners;
|
||||
|
||||
if (!openReadOnly && hasChanges) {
|
||||
// My pending changes transfer to the new reader
|
||||
clone.liveDocsDirty = liveDocsDirty;
|
||||
clone.hasChanges = hasChanges;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
if (doClone) {
|
||||
if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
} else {
|
||||
if (!deletionsUpToDate) {
|
||||
// load deleted docs
|
||||
assert clone.liveDocs == null;
|
||||
clone.loadLiveDocs(IOContext.READ);
|
||||
} else if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
// An exception occurred during reopen, we have to decRef the norms
|
||||
// that we incRef'ed already and close singleNormsStream and FieldsReader
|
||||
clone.decRef();
|
||||
}
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
void doCommit() throws IOException {
|
||||
assert hasChanges;
|
||||
startCommit();
|
||||
boolean success = false;
|
||||
try {
|
||||
commitChanges();
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
rollbackCommit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
private void startCommit() {
|
||||
rollbackSegmentInfo = (SegmentInfo) si.clone();
|
||||
rollbackHasChanges = hasChanges;
|
||||
rollbackDeletedDocsDirty = liveDocsDirty;
|
||||
rollbackPendingDeleteCount = pendingDeleteCount;
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
private void rollbackCommit() {
|
||||
si.reset(rollbackSegmentInfo);
|
||||
hasChanges = rollbackHasChanges;
|
||||
liveDocsDirty = rollbackDeletedDocsDirty;
|
||||
pendingDeleteCount = rollbackPendingDeleteCount;
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
private synchronized void commitChanges() throws IOException {
|
||||
if (liveDocsDirty) { // re-write deleted
|
||||
si.advanceDelGen();
|
||||
|
||||
assert liveDocs.length() == si.docCount;
|
||||
|
||||
// We can write directly to the actual name (vs to a
|
||||
// .tmp & renaming it) because the file is not live
|
||||
// until segments file is written:
|
||||
final String delFileName = si.getDelFileName();
|
||||
boolean success = false;
|
||||
try {
|
||||
liveDocs.write(directory(), delFileName, IOContext.DEFAULT);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
directory().deleteFile(delFileName);
|
||||
} catch (Throwable t) {
|
||||
// suppress this so we keep throwing the
|
||||
// original exception
|
||||
}
|
||||
}
|
||||
}
|
||||
si.setDelCount(si.getDelCount()+pendingDeleteCount);
|
||||
pendingDeleteCount = 0;
|
||||
assert (maxDoc()-liveDocs.count()) == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + (maxDoc()-liveDocs.count());
|
||||
} else {
|
||||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
|
||||
liveDocsDirty = false;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
synchronized void deleteDocument(int docNum) throws IOException {
|
||||
ensureOpen();
|
||||
hasChanges = true;
|
||||
doDelete(docNum);
|
||||
}
|
||||
|
||||
// TODO: remove deletions from SR
|
||||
void doDelete(int docNum) {
|
||||
if (liveDocs == null) {
|
||||
liveDocs = new BitVector(maxDoc());
|
||||
liveDocs.setAll();
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
}
|
||||
// there is more than 1 SegmentReader with a reference to this
|
||||
// liveDocs BitVector so decRef the current liveDocsRef,
|
||||
// clone the BitVector, create a new liveDocsRef
|
||||
if (liveDocsRef.get() > 1) {
|
||||
AtomicInteger oldRef = liveDocsRef;
|
||||
liveDocs = cloneDeletedDocs(liveDocs);
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
oldRef.decrementAndGet();
|
||||
}
|
||||
liveDocsDirty = true;
|
||||
if (liveDocs.getAndClear(docNum)) {
|
||||
pendingDeleteCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,13 +109,4 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
ensureOpen();
|
||||
return readerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
synchronized(normsCache) {
|
||||
normsCache.remove(field);
|
||||
}
|
||||
in.doSetNorm(n, field, value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -166,16 +166,16 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
protected void readHeader(IndexInput input) throws IOException {
|
||||
CodecUtil.checkHeader(input, BlockTreeTermsWriter.CODEC_NAME,
|
||||
BlockTreeTermsWriter.VERSION_START,
|
||||
BlockTreeTermsWriter.VERSION_CURRENT);
|
||||
CodecUtil.checkHeader(input, BlockTreeTermsWriter.TERMS_CODEC_NAME,
|
||||
BlockTreeTermsWriter.TERMS_VERSION_START,
|
||||
BlockTreeTermsWriter.TERMS_VERSION_CURRENT);
|
||||
dirOffset = input.readLong();
|
||||
}
|
||||
|
||||
protected void readIndexHeader(IndexInput input) throws IOException {
|
||||
CodecUtil.checkHeader(input, BlockTreeTermsWriter.CODEC_NAME,
|
||||
BlockTreeTermsWriter.VERSION_START,
|
||||
BlockTreeTermsWriter.VERSION_CURRENT);
|
||||
CodecUtil.checkHeader(input, BlockTreeTermsWriter.TERMS_INDEX_CODEC_NAME,
|
||||
BlockTreeTermsWriter.TERMS_INDEX_VERSION_START,
|
||||
BlockTreeTermsWriter.TERMS_INDEX_VERSION_CURRENT);
|
||||
indexDirOffset = input.readLong();
|
||||
}
|
||||
|
||||
|
|
|
@ -99,18 +99,21 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
static final int OUTPUT_FLAG_IS_FLOOR = 0x1;
|
||||
static final int OUTPUT_FLAG_HAS_TERMS = 0x2;
|
||||
|
||||
final static String CODEC_NAME = "BLOCK_TREE_TERMS_DICT";
|
||||
|
||||
// Initial format
|
||||
public static final int VERSION_START = 0;
|
||||
|
||||
public static final int VERSION_CURRENT = VERSION_START;
|
||||
|
||||
/** Extension of terms file */
|
||||
static final String TERMS_EXTENSION = "tim";
|
||||
static final String TERMS_INDEX_EXTENSION = "tip";
|
||||
final static String TERMS_CODEC_NAME = "BLOCK_TREE_TERMS_DICT";
|
||||
// Initial format
|
||||
public static final int TERMS_VERSION_START = 0;
|
||||
public static final int TERMS_VERSION_CURRENT = TERMS_VERSION_START;
|
||||
|
||||
protected final IndexOutput out;
|
||||
/** Extension of terms index file */
|
||||
static final String TERMS_INDEX_EXTENSION = "tip";
|
||||
final static String TERMS_INDEX_CODEC_NAME = "BLOCK_TREE_TERMS_INDEX";
|
||||
// Initial format
|
||||
public static final int TERMS_INDEX_VERSION_START = 0;
|
||||
public static final int TERMS_INDEX_VERSION_CURRENT = TERMS_INDEX_VERSION_START;
|
||||
|
||||
private final IndexOutput out;
|
||||
private final IndexOutput indexOut;
|
||||
final int minItemsInBlock;
|
||||
final int maxItemsInBlock;
|
||||
|
@ -178,22 +181,22 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
}
|
||||
|
||||
protected void writeHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
|
||||
CodecUtil.writeHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT);
|
||||
out.writeLong(0); // leave space for end index pointer
|
||||
}
|
||||
|
||||
protected void writeIndexHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
|
||||
CodecUtil.writeHeader(out, TERMS_INDEX_CODEC_NAME, TERMS_INDEX_VERSION_CURRENT);
|
||||
out.writeLong(0); // leave space for end index pointer
|
||||
}
|
||||
|
||||
protected void writeTrailer(long dirStart) throws IOException {
|
||||
out.seek(CodecUtil.headerLength(CODEC_NAME));
|
||||
protected void writeTrailer(IndexOutput out, long dirStart) throws IOException {
|
||||
out.seek(CodecUtil.headerLength(TERMS_CODEC_NAME));
|
||||
out.writeLong(dirStart);
|
||||
}
|
||||
|
||||
protected void writeIndexTrailer(long dirStart) throws IOException {
|
||||
indexOut.seek(CodecUtil.headerLength(CODEC_NAME));
|
||||
protected void writeIndexTrailer(IndexOutput indexOut, long dirStart) throws IOException {
|
||||
indexOut.seek(CodecUtil.headerLength(TERMS_INDEX_CODEC_NAME));
|
||||
indexOut.writeLong(dirStart);
|
||||
}
|
||||
|
||||
|
@ -935,8 +938,8 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
indexOut.writeVLong(field.indexStartFP);
|
||||
}
|
||||
}
|
||||
writeTrailer(dirStart);
|
||||
writeIndexTrailer(indexDirStart);
|
||||
writeTrailer(out, dirStart);
|
||||
writeIndexTrailer(indexOut, indexDirStart);
|
||||
} catch (IOException ioe2) {
|
||||
ioe = ioe2;
|
||||
} finally {
|
||||
|
|
|
@ -51,6 +51,7 @@ public abstract class Codec implements NamedSPILoader.NamedSPI {
|
|||
// TODO: segmentInfosFormat should be allowed to declare additional files
|
||||
// if it wants, in addition to segments_N
|
||||
docValuesFormat().files(dir, info, files);
|
||||
normsFormat().files(dir, info, files);
|
||||
}
|
||||
|
||||
/** Encodes/decodes postings */
|
||||
|
@ -71,6 +72,9 @@ public abstract class Codec implements NamedSPILoader.NamedSPI {
|
|||
/** Encodes/decodes segments file */
|
||||
public abstract SegmentInfosFormat segmentInfosFormat();
|
||||
|
||||
/** Encodes/decodes document normalization values */
|
||||
public abstract NormsFormat normsFormat();
|
||||
|
||||
/** looks up a codec by name */
|
||||
public static Codec forName(String name) {
|
||||
return loader.lookup(name);
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
/**
|
||||
* format for normalization factors
|
||||
*/
|
||||
public abstract class NormsFormat {
|
||||
/** Note: separateNormsDir should not be used! */
|
||||
public abstract NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException;
|
||||
public abstract NormsWriter normsWriter(SegmentWriteState state) throws IOException;
|
||||
public abstract void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException;
|
||||
|
||||
/**
|
||||
* Note: this should not be overridden!
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {};
|
||||
}
|
|
@ -1,3 +1,5 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -15,22 +17,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This exception is thrown when an {@link IndexReader}
|
||||
* tries to make changes to the index (via {@link
|
||||
* IndexReader#deleteDocument}, {@link
|
||||
* IndexReader#undeleteAll} or {@link IndexReader#setNorm})
|
||||
* but changes have already been committed to the index
|
||||
* since this reader was instantiated. When this happens
|
||||
* you must open a new reader on the current index to make
|
||||
* the changes.
|
||||
*/
|
||||
public class StaleReaderException extends IOException {
|
||||
public StaleReaderException(String message) {
|
||||
super(message);
|
||||
}
|
||||
//simple api just for now before switching to docvalues apis
|
||||
public abstract class NormsReader implements Closeable {
|
||||
public abstract byte[] norms(String name) throws IOException;
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with this
|
||||
* work for additional information regarding copyright ownership. The ASF
|
||||
* licenses this file to You under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations under
|
||||
* the License.
|
||||
*/
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
// simple api just for now before switching to docvalues apis
|
||||
public abstract class NormsWriter implements Closeable {
|
||||
|
||||
// TODO: I think IW should set info.normValueType from Similarity,
|
||||
// and then this method just returns DocValuesConsumer
|
||||
public abstract void startField(FieldInfo info) throws IOException;
|
||||
public abstract void writeNorm(byte norm) throws IOException;
|
||||
public abstract void finish(int numDocs) throws IOException;
|
||||
|
||||
public int merge(MergeState mergeState) throws IOException {
|
||||
int numMergedDocs = 0;
|
||||
for (FieldInfo fi : mergeState.fieldInfos) {
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
startField(fi);
|
||||
int numMergedDocsForField = 0;
|
||||
for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
|
||||
final int maxDoc = reader.reader.maxDoc();
|
||||
byte normBuffer[] = reader.reader.norms(fi.name);
|
||||
if (normBuffer == null) {
|
||||
// Can be null if this segment doesn't have
|
||||
// any docs with this field
|
||||
normBuffer = new byte[maxDoc];
|
||||
Arrays.fill(normBuffer, (byte)0);
|
||||
}
|
||||
// this segment has deleted docs, so we have to
|
||||
// check for every doc if it is deleted or not
|
||||
final Bits liveDocs = reader.liveDocs;
|
||||
for (int k = 0; k < maxDoc; k++) {
|
||||
if (liveDocs == null || liveDocs.get(k)) {
|
||||
writeNorm(normBuffer[k]);
|
||||
numMergedDocsForField++;
|
||||
}
|
||||
}
|
||||
mergeState.checkAbort.work(maxDoc);
|
||||
}
|
||||
assert numMergedDocs == 0 || numMergedDocs == numMergedDocsForField;
|
||||
numMergedDocs = numMergedDocsForField;
|
||||
}
|
||||
}
|
||||
finish(numMergedDocs);
|
||||
return numMergedDocs;
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.index.codecs.appending;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
|
@ -27,6 +28,7 @@ import org.apache.lucene.index.codecs.TermVectorsFormat;
|
|||
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40NormsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
|
||||
|
@ -47,6 +49,7 @@ public class AppendingCodec extends Codec {
|
|||
private final FieldInfosFormat fieldInfos = new Lucene40FieldInfosFormat();
|
||||
private final TermVectorsFormat vectors = new Lucene40TermVectorsFormat();
|
||||
private final DocValuesFormat docValues = new Lucene40DocValuesFormat();
|
||||
private final NormsFormat norms = new Lucene40NormsFormat();
|
||||
|
||||
@Override
|
||||
public PostingsFormat postingsFormat() {
|
||||
|
@ -77,4 +80,9 @@ public class AppendingCodec extends Codec {
|
|||
public FieldInfosFormat fieldInfosFormat() {
|
||||
return fieldInfos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsFormat normsFormat() {
|
||||
return norms;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,19 +23,16 @@ import java.util.Set;
|
|||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.BlockTreeTermsReader;
|
||||
import org.apache.lucene.index.codecs.BlockTreeTermsWriter;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.index.codecs.FieldsProducer;
|
||||
import org.apache.lucene.index.codecs.FixedGapTermsIndexReader;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsReader;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsWriter;
|
||||
import org.apache.lucene.index.codecs.PostingsReaderBase;
|
||||
import org.apache.lucene.index.codecs.PostingsWriterBase;
|
||||
import org.apache.lucene.index.codecs.BlockTermsReader;
|
||||
import org.apache.lucene.index.codecs.TermsIndexReaderBase;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* Appending postings impl
|
||||
|
@ -48,72 +45,39 @@ class AppendingPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state)
|
||||
throws IOException {
|
||||
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
|
||||
PostingsWriterBase docsWriter = new Lucene40PostingsWriter(state);
|
||||
boolean success = false;
|
||||
AppendingTermsIndexWriter indexWriter = null;
|
||||
try {
|
||||
indexWriter = new AppendingTermsIndexWriter(state);
|
||||
FieldsConsumer ret = new AppendingTermsWriter(state, docsWriter, BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
if (!success) {
|
||||
docsWriter.close();
|
||||
}
|
||||
}
|
||||
success = false;
|
||||
try {
|
||||
FieldsConsumer ret = new AppendingTermsDictWriter(indexWriter, state, docsWriter);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
docsWriter.close();
|
||||
} finally {
|
||||
indexWriter.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldsProducer fieldsProducer(SegmentReadState state)
|
||||
throws IOException {
|
||||
PostingsReaderBase docsReader = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix);
|
||||
TermsIndexReaderBase indexReader;
|
||||
|
||||
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
||||
PostingsReaderBase postings = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix);
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
indexReader = new AppendingTermsIndexReader(state.dir,
|
||||
state.fieldInfos,
|
||||
state.segmentInfo.name,
|
||||
state.termsIndexDivisor,
|
||||
BytesRef.getUTF8SortedAsUnicodeComparator(),
|
||||
state.segmentSuffix, state.context);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
docsReader.close();
|
||||
}
|
||||
}
|
||||
success = false;
|
||||
try {
|
||||
FieldsProducer ret = new AppendingTermsDictReader(indexReader,
|
||||
state.dir, state.fieldInfos, state.segmentInfo.name,
|
||||
docsReader,
|
||||
state.context,
|
||||
Lucene40PostingsFormat.TERMS_CACHE_SIZE,
|
||||
state.segmentSuffix);
|
||||
FieldsProducer ret = new AppendingTermsReader(
|
||||
state.dir,
|
||||
state.fieldInfos,
|
||||
state.segmentInfo.name,
|
||||
postings,
|
||||
state.context,
|
||||
state.segmentSuffix,
|
||||
state.termsIndexDivisor);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
docsReader.close();
|
||||
} finally {
|
||||
indexReader.close();
|
||||
}
|
||||
postings.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +86,6 @@ class AppendingPostingsFormat extends PostingsFormat {
|
|||
public void files(Directory dir, SegmentInfo segmentInfo, String segmentSuffix, Set<String> files)
|
||||
throws IOException {
|
||||
Lucene40PostingsReader.files(dir, segmentInfo, segmentSuffix, files);
|
||||
BlockTermsReader.files(dir, segmentInfo, segmentSuffix, files);
|
||||
FixedGapTermsIndexReader.files(dir, segmentInfo, segmentSuffix, files);
|
||||
BlockTreeTermsReader.files(dir, segmentInfo, segmentSuffix, files);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
package org.apache.lucene.index.codecs.appending;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.codecs.PostingsReaderBase;
|
||||
import org.apache.lucene.index.codecs.BlockTermsReader;
|
||||
import org.apache.lucene.index.codecs.BlockTermsWriter;
|
||||
import org.apache.lucene.index.codecs.TermsIndexReaderBase;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.CodecUtil;
|
||||
|
||||
public class AppendingTermsDictReader extends BlockTermsReader {
|
||||
|
||||
public AppendingTermsDictReader(TermsIndexReaderBase indexReader,
|
||||
Directory dir, FieldInfos fieldInfos, String segment,
|
||||
PostingsReaderBase postingsReader, IOContext context,
|
||||
int termsCacheSize, String segmentSuffix) throws IOException {
|
||||
super(indexReader, dir, fieldInfos, segment, postingsReader, context,
|
||||
termsCacheSize, segmentSuffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readHeader(IndexInput in) throws IOException {
|
||||
CodecUtil.checkHeader(in, AppendingTermsDictWriter.CODEC_NAME,
|
||||
BlockTermsWriter.VERSION_START, BlockTermsWriter.VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void seekDir(IndexInput in, long dirOffset) throws IOException {
|
||||
in.seek(in.length() - Long.SIZE / 8);
|
||||
long offset = in.readLong();
|
||||
in.seek(offset);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
package org.apache.lucene.index.codecs.appending;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.PostingsWriterBase;
|
||||
import org.apache.lucene.index.codecs.BlockTermsWriter;
|
||||
import org.apache.lucene.index.codecs.TermsIndexWriterBase;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.CodecUtil;
|
||||
|
||||
public class AppendingTermsDictWriter extends BlockTermsWriter {
|
||||
final static String CODEC_NAME = "APPENDING_TERMS_DICT";
|
||||
|
||||
public AppendingTermsDictWriter(TermsIndexWriterBase indexWriter,
|
||||
SegmentWriteState state, PostingsWriterBase postingsWriter)
|
||||
throws IOException {
|
||||
super(indexWriter, state, postingsWriter);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeTrailer(long dirStart) throws IOException {
|
||||
out.writeLong(dirStart);
|
||||
}
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package org.apache.lucene.index.codecs.appending;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.FixedGapTermsIndexWriter;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.CodecUtil;
|
||||
|
||||
public class AppendingTermsIndexWriter extends FixedGapTermsIndexWriter {
|
||||
final static String CODEC_NAME = "APPENDING_TERMS_INDEX";
|
||||
final static int VERSION_START = 0;
|
||||
final static int VERSION_CURRENT = VERSION_START;
|
||||
|
||||
public AppendingTermsIndexWriter(SegmentWriteState state) throws IOException {
|
||||
super(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeTrailer(long dirStart) throws IOException {
|
||||
out.writeLong(dirStart);
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
package org.apache.lucene.index.codecs.appending;
|
||||
|
||||
/*
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
|
@ -18,30 +18,40 @@ package org.apache.lucene.index.codecs.appending;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.codecs.FixedGapTermsIndexReader;
|
||||
import org.apache.lucene.index.codecs.BlockTreeTermsReader;
|
||||
import org.apache.lucene.index.codecs.PostingsReaderBase;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CodecUtil;
|
||||
|
||||
public class AppendingTermsIndexReader extends FixedGapTermsIndexReader {
|
||||
/**
|
||||
* Reads append-only terms from {@link AppendingTermsWriter}
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class AppendingTermsReader extends BlockTreeTermsReader {
|
||||
|
||||
public AppendingTermsIndexReader(Directory dir, FieldInfos fieldInfos,
|
||||
String segment, int indexDivisor, Comparator<BytesRef> termComp, String segmentSuffix, IOContext context)
|
||||
throws IOException {
|
||||
super(dir, fieldInfos, segment, indexDivisor, termComp, segmentSuffix, context);
|
||||
public AppendingTermsReader(Directory dir, FieldInfos fieldInfos, String segment, PostingsReaderBase postingsReader,
|
||||
IOContext ioContext, String segmentSuffix, int indexDivisor) throws IOException {
|
||||
super(dir, fieldInfos, segment, postingsReader, ioContext, segmentSuffix, indexDivisor);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void readHeader(IndexInput input) throws IOException {
|
||||
CodecUtil.checkHeader(input, AppendingTermsIndexWriter.CODEC_NAME,
|
||||
AppendingTermsIndexWriter.VERSION_START, AppendingTermsIndexWriter.VERSION_START);
|
||||
CodecUtil.checkHeader(input, AppendingTermsWriter.TERMS_CODEC_NAME,
|
||||
AppendingTermsWriter.TERMS_VERSION_START,
|
||||
AppendingTermsWriter.TERMS_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readIndexHeader(IndexInput input) throws IOException {
|
||||
CodecUtil.checkHeader(input, AppendingTermsWriter.TERMS_INDEX_CODEC_NAME,
|
||||
AppendingTermsWriter.TERMS_INDEX_VERSION_START,
|
||||
AppendingTermsWriter.TERMS_INDEX_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void seekDir(IndexInput input, long dirOffset) throws IOException {
|
||||
input.seek(input.length() - Long.SIZE / 8);
|
|
@ -0,0 +1,64 @@
|
|||
package org.apache.lucene.index.codecs.appending;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.BlockTreeTermsWriter;
|
||||
import org.apache.lucene.index.codecs.PostingsWriterBase;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.CodecUtil;
|
||||
|
||||
/**
|
||||
* Append-only version of {@link BlockTreeTermsWriter}
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class AppendingTermsWriter extends BlockTreeTermsWriter {
|
||||
final static String TERMS_CODEC_NAME = "APPENDING_TERMS_DICT";
|
||||
final static int TERMS_VERSION_START = 0;
|
||||
final static int TERMS_VERSION_CURRENT = TERMS_VERSION_START;
|
||||
|
||||
final static String TERMS_INDEX_CODEC_NAME = "APPENDING_TERMS_INDEX";
|
||||
final static int TERMS_INDEX_VERSION_START = 0;
|
||||
final static int TERMS_INDEX_VERSION_CURRENT = TERMS_INDEX_VERSION_START;
|
||||
|
||||
public AppendingTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter, int minItemsInBlock, int maxItemsInBlock) throws IOException {
|
||||
super(state, postingsWriter, minItemsInBlock, maxItemsInBlock);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeIndexHeader(IndexOutput out) throws IOException {
|
||||
CodecUtil.writeHeader(out, TERMS_INDEX_CODEC_NAME, TERMS_INDEX_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeTrailer(IndexOutput out, long dirStart) throws IOException {
|
||||
out.writeLong(dirStart);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeIndexTrailer(IndexOutput indexOut, long dirStart) throws IOException {
|
||||
indexOut.writeLong(dirStart);
|
||||
}
|
||||
}
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.SegmentReadState;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.PerDocConsumer;
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
|
@ -33,6 +34,7 @@ import org.apache.lucene.index.codecs.PostingsFormat;
|
|||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40NormsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
|
@ -62,6 +64,9 @@ public class Lucene3xCodec extends Codec {
|
|||
// this way IR.commit fails on delete/undelete/setNorm/etc ?
|
||||
private final SegmentInfosFormat infosFormat = new Lucene40SegmentInfosFormat();
|
||||
|
||||
// TODO: this should really be a different impl
|
||||
private final NormsFormat normsFormat = new Lucene40NormsFormat();
|
||||
|
||||
// 3.x doesn't support docvalues
|
||||
private final DocValuesFormat docValuesFormat = new DocValuesFormat() {
|
||||
@Override
|
||||
|
@ -107,4 +112,9 @@ public class Lucene3xCodec extends Codec {
|
|||
public SegmentInfosFormat segmentInfosFormat() {
|
||||
return infosFormat;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsFormat normsFormat() {
|
||||
return normsFormat;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.index.codecs.lucene40;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
|
@ -40,6 +41,7 @@ public class Lucene40Codec extends Codec {
|
|||
private final FieldInfosFormat fieldInfosFormat = new Lucene40FieldInfosFormat();
|
||||
private final DocValuesFormat docValuesFormat = new Lucene40DocValuesFormat();
|
||||
private final SegmentInfosFormat infosFormat = new Lucene40SegmentInfosFormat();
|
||||
private final NormsFormat normsFormat = new Lucene40NormsFormat();
|
||||
private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
|
||||
@Override
|
||||
public PostingsFormat getPostingsFormatForField(String field) {
|
||||
|
@ -81,6 +83,11 @@ public class Lucene40Codec extends Codec {
|
|||
return infosFormat;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsFormat normsFormat() {
|
||||
return normsFormat;
|
||||
}
|
||||
|
||||
/** Returns the postings format that should be used for writing
|
||||
* new segments of <code>field</code>.
|
||||
*
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.NormsReader;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
public class Lucene40NormsFormat extends NormsFormat {
|
||||
|
||||
@Override
|
||||
public NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
|
||||
return new Lucene40NormsReader(dir, info, fields, context, separateNormsDir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsWriter normsWriter(SegmentWriteState state) throws IOException {
|
||||
return new Lucene40NormsWriter(state.directory, state.segmentName, state.context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
Lucene40NormsReader.files(dir, info, files);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
Lucene40NormsReader.separateFiles(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.NormsReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.MapBackedSet;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
public class Lucene40NormsReader extends NormsReader {
|
||||
// this would be replaced by Source/SourceCache in a dv impl.
|
||||
// for now we have our own mini-version
|
||||
final Map<String,Norm> norms = new HashMap<String,Norm>();
|
||||
// any .nrm or .sNN files we have open at any time.
|
||||
// TODO: just a list, and double-close() separate norms files?
|
||||
final Set<IndexInput> openFiles = new MapBackedSet<IndexInput>(new IdentityHashMap<IndexInput,Boolean>());
|
||||
// points to a singleNormFile
|
||||
IndexInput singleNormStream;
|
||||
final int maxdoc;
|
||||
|
||||
// note: just like segmentreader in 3.x, we open up all the files here (including separate norms) up front.
|
||||
// but we just don't do any seeks or reading yet.
|
||||
public Lucene40NormsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
|
||||
maxdoc = info.docCount;
|
||||
String segmentName = info.name;
|
||||
Map<Integer,Long> normGen = info.getNormGen();
|
||||
boolean success = false;
|
||||
try {
|
||||
long nextNormSeek = Lucene40NormsWriter.NORMS_HEADER.length; //skip header (header unused for now)
|
||||
for (FieldInfo fi : fields) {
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
String fileName = getNormFilename(segmentName, normGen, fi.number);
|
||||
Directory d = hasSeparateNorms(normGen, fi.number) ? separateNormsDir : dir;
|
||||
|
||||
// singleNormFile means multiple norms share this file
|
||||
boolean singleNormFile = IndexFileNames.matchesExtension(fileName, Lucene40NormsWriter.NORMS_EXTENSION);
|
||||
IndexInput normInput = null;
|
||||
long normSeek;
|
||||
|
||||
if (singleNormFile) {
|
||||
normSeek = nextNormSeek;
|
||||
if (singleNormStream == null) {
|
||||
singleNormStream = d.openInput(fileName, context);
|
||||
openFiles.add(singleNormStream);
|
||||
}
|
||||
// All norms in the .nrm file can share a single IndexInput since
|
||||
// they are only used in a synchronized context.
|
||||
// If this were to change in the future, a clone could be done here.
|
||||
normInput = singleNormStream;
|
||||
} else {
|
||||
normInput = d.openInput(fileName, context);
|
||||
openFiles.add(normInput);
|
||||
// if the segment was created in 3.2 or after, we wrote the header for sure,
|
||||
// and don't need to do the sketchy file size check. otherwise, we check
|
||||
// if the size is exactly equal to maxDoc to detect a headerless file.
|
||||
// NOTE: remove this check in Lucene 5.0!
|
||||
String version = info.getVersion();
|
||||
final boolean isUnversioned =
|
||||
(version == null || StringHelper.getVersionComparator().compare(version, "3.2") < 0)
|
||||
&& normInput.length() == maxdoc;
|
||||
if (isUnversioned) {
|
||||
normSeek = 0;
|
||||
} else {
|
||||
normSeek = Lucene40NormsWriter.NORMS_HEADER.length;
|
||||
}
|
||||
}
|
||||
|
||||
Norm norm = new Norm();
|
||||
norm.file = normInput;
|
||||
norm.offset = normSeek;
|
||||
norms.put(fi.name, norm);
|
||||
nextNormSeek += maxdoc; // increment also if some norms are separate
|
||||
}
|
||||
}
|
||||
// TODO: change to a real check? see LUCENE-3619
|
||||
assert singleNormStream == null || nextNormSeek == singleNormStream.length();
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
IOUtils.closeWhileHandlingException(openFiles);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] norms(String name) throws IOException {
|
||||
Norm norm = norms.get(name);
|
||||
return norm == null ? null : norm.bytes();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
IOUtils.close(openFiles);
|
||||
} finally {
|
||||
norms.clear();
|
||||
openFiles.clear();
|
||||
}
|
||||
}
|
||||
|
||||
private static String getNormFilename(String segmentName, Map<Integer,Long> normGen, int number) {
|
||||
if (hasSeparateNorms(normGen, number)) {
|
||||
return IndexFileNames.fileNameFromGeneration(segmentName, Lucene40NormsWriter.SEPARATE_NORMS_EXTENSION + number, normGen.get(number));
|
||||
} else {
|
||||
// single file for all norms
|
||||
return IndexFileNames.fileNameFromGeneration(segmentName, Lucene40NormsWriter.NORMS_EXTENSION, SegmentInfo.WITHOUT_GEN);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean hasSeparateNorms(Map<Integer,Long> normGen, int number) {
|
||||
if (normGen == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Long gen = normGen.get(number);
|
||||
return gen != null && gen.longValue() != SegmentInfo.NO;
|
||||
}
|
||||
|
||||
class Norm {
|
||||
IndexInput file;
|
||||
long offset;
|
||||
byte bytes[];
|
||||
|
||||
synchronized byte[] bytes() throws IOException {
|
||||
if (bytes == null) {
|
||||
bytes = new byte[maxdoc];
|
||||
// some norms share fds
|
||||
synchronized(file) {
|
||||
file.seek(offset);
|
||||
file.readBytes(bytes, 0, bytes.length, false);
|
||||
}
|
||||
// we are done with this file
|
||||
if (file != singleNormStream) {
|
||||
openFiles.remove(file);
|
||||
file.close();
|
||||
file = null;
|
||||
}
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
}
|
||||
|
||||
static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
// TODO: This is what SI always did... but we can do this cleaner?
|
||||
// like first FI that has norms but doesn't have separate norms?
|
||||
final String normsFileName = IndexFileNames.segmentFileName(info.name, "", Lucene40NormsWriter.NORMS_EXTENSION);
|
||||
if (dir.fileExists(normsFileName)) {
|
||||
files.add(normsFileName);
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
@Deprecated
|
||||
static void separateFiles(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
Map<Integer,Long> normGen = info.getNormGen();
|
||||
if (normGen != null) {
|
||||
for (Entry<Integer,Long> entry : normGen.entrySet()) {
|
||||
long gen = entry.getValue();
|
||||
if (gen >= SegmentInfo.YES) {
|
||||
// Definitely a separate norm file, with generation:
|
||||
files.add(IndexFileNames.fileNameFromGeneration(info.name, Lucene40NormsWriter.SEPARATE_NORMS_EXTENSION + entry.getKey(), gen));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
public class Lucene40NormsWriter extends NormsWriter {
|
||||
private IndexOutput out;
|
||||
private int normCount = 0;
|
||||
|
||||
/** norms header placeholder */
|
||||
static final byte[] NORMS_HEADER = new byte[]{'N','R','M',-1};
|
||||
|
||||
/** Extension of norms file */
|
||||
static final String NORMS_EXTENSION = "nrm";
|
||||
|
||||
/** Extension of separate norms file
|
||||
* @deprecated */
|
||||
@Deprecated
|
||||
static final String SEPARATE_NORMS_EXTENSION = "s";
|
||||
|
||||
public Lucene40NormsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
final String normsFileName = IndexFileNames.segmentFileName(segment, "", NORMS_EXTENSION);
|
||||
boolean success = false;
|
||||
try {
|
||||
out = directory.createOutput(normsFileName, context);
|
||||
out.writeBytes(NORMS_HEADER, 0, NORMS_HEADER.length);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
IOUtils.closeWhileHandlingException(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startField(FieldInfo info) throws IOException {
|
||||
assert info.omitNorms == false;
|
||||
normCount++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNorm(byte norm) throws IOException {
|
||||
out.writeByte(norm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finish(int numDocs) throws IOException {
|
||||
if (4+normCount*(long)numDocs != out.getFilePointer()) {
|
||||
throw new RuntimeException(".nrm file size mismatch: expected=" + (4+normCount*(long)numDocs) + " actual=" + out.getFilePointer());
|
||||
}
|
||||
}
|
||||
|
||||
/** we override merge and bulk-merge norms when there are no deletions */
|
||||
@Override
|
||||
public int merge(MergeState mergeState) throws IOException {
|
||||
int numMergedDocs = 0;
|
||||
for (FieldInfo fi : mergeState.fieldInfos) {
|
||||
if (fi.isIndexed && !fi.omitNorms) {
|
||||
startField(fi);
|
||||
int numMergedDocsForField = 0;
|
||||
for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
|
||||
final int maxDoc = reader.reader.maxDoc();
|
||||
byte normBuffer[] = reader.reader.norms(fi.name);
|
||||
if (normBuffer == null) {
|
||||
// Can be null if this segment doesn't have
|
||||
// any docs with this field
|
||||
normBuffer = new byte[maxDoc];
|
||||
Arrays.fill(normBuffer, (byte)0);
|
||||
}
|
||||
if (reader.liveDocs == null) {
|
||||
//optimized case for segments without deleted docs
|
||||
out.writeBytes(normBuffer, maxDoc);
|
||||
numMergedDocsForField += maxDoc;
|
||||
} else {
|
||||
// this segment has deleted docs, so we have to
|
||||
// check for every doc if it is deleted or not
|
||||
final Bits liveDocs = reader.liveDocs;
|
||||
for (int k = 0; k < maxDoc; k++) {
|
||||
if (liveDocs.get(k)) {
|
||||
numMergedDocsForField++;
|
||||
out.writeByte(normBuffer[k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
mergeState.checkAbort.work(maxDoc);
|
||||
}
|
||||
assert numMergedDocs == 0 || numMergedDocs == numMergedDocsForField;
|
||||
numMergedDocs = numMergedDocsForField;
|
||||
}
|
||||
}
|
||||
finish(numMergedDocs);
|
||||
return numMergedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
IOUtils.close(out);
|
||||
} finally {
|
||||
out = null;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.index.codecs.simpletext;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
|
@ -40,6 +41,8 @@ public final class SimpleTextCodec extends Codec {
|
|||
private final TermVectorsFormat vectorsFormat = new SimpleTextTermVectorsFormat();
|
||||
// TODO: need a plain-text impl
|
||||
private final DocValuesFormat docValues = new Lucene40DocValuesFormat();
|
||||
// TODO: need a plain-text impl (using the above)
|
||||
private final NormsFormat normsFormat = new SimpleTextNormsFormat();
|
||||
|
||||
public SimpleTextCodec() {
|
||||
super("SimpleText");
|
||||
|
@ -74,4 +77,9 @@ public final class SimpleTextCodec extends Codec {
|
|||
public SegmentInfosFormat segmentInfosFormat() {
|
||||
return segmentInfos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsFormat normsFormat() {
|
||||
return normsFormat;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
package org.apache.lucene.index.codecs.simpletext;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.NormsReader;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
/**
|
||||
* plain-text norms format
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextNormsFormat extends NormsFormat {
|
||||
|
||||
@Override
|
||||
public NormsReader normsReader(Directory dir, SegmentInfo info, FieldInfos fields, IOContext context, Directory separateNormsDir) throws IOException {
|
||||
return new SimpleTextNormsReader(dir, info, fields, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsWriter normsWriter(SegmentWriteState state) throws IOException {
|
||||
return new SimpleTextNormsWriter(state.directory, state.segmentName, state.context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
SimpleTextNormsReader.files(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package org.apache.lucene.index.codecs.simpletext;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.NormsReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
import static org.apache.lucene.index.codecs.simpletext.SimpleTextNormsWriter.*;
|
||||
|
||||
/**
|
||||
* Reads plain-text norms
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextNormsReader extends NormsReader {
|
||||
private Map<String,byte[]> norms = new HashMap<String,byte[]>();
|
||||
|
||||
public SimpleTextNormsReader(Directory directory, SegmentInfo si, FieldInfos fields, IOContext context) throws IOException {
|
||||
if (fields.hasNorms()) {
|
||||
readNorms(directory.openInput(IndexFileNames.segmentFileName(si.name, "", NORMS_EXTENSION), context), si.docCount);
|
||||
}
|
||||
}
|
||||
|
||||
// we read in all the norms up front into a hashmap
|
||||
private void readNorms(IndexInput in, int maxDoc) throws IOException {
|
||||
BytesRef scratch = new BytesRef();
|
||||
boolean success = false;
|
||||
try {
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
while (!scratch.equals(END)) {
|
||||
assert StringHelper.startsWith(scratch, FIELD);
|
||||
String fieldName = readString(FIELD.length, scratch);
|
||||
byte bytes[] = new byte[maxDoc];
|
||||
for (int i = 0; i < bytes.length; i++) {
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch, DOC);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch, NORM);
|
||||
bytes[i] = scratch.bytes[scratch.offset + NORM.length];
|
||||
}
|
||||
norms.put(fieldName, bytes);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch, FIELD) || scratch.equals(END);
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (success) {
|
||||
IOUtils.close(in);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] norms(String name) throws IOException {
|
||||
return norms.get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
norms = null;
|
||||
}
|
||||
|
||||
static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
// TODO: This is what SI always did... but we can do this cleaner?
|
||||
// like first FI that has norms but doesn't have separate norms?
|
||||
final String normsFileName = IndexFileNames.segmentFileName(info.name, "", SimpleTextNormsWriter.NORMS_EXTENSION);
|
||||
if (dir.fileExists(normsFileName)) {
|
||||
files.add(normsFileName);
|
||||
}
|
||||
}
|
||||
|
||||
private String readString(int offset, BytesRef scratch) {
|
||||
return new String(scratch.bytes, scratch.offset+offset, scratch.length-offset, IOUtils.CHARSET_UTF_8);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
package org.apache.lucene.index.codecs.simpletext;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.codecs.NormsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/**
|
||||
* Writes plain-text norms
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextNormsWriter extends NormsWriter {
|
||||
private IndexOutput out;
|
||||
private int docid = 0;
|
||||
|
||||
/** Extension of norms file */
|
||||
static final String NORMS_EXTENSION = "len";
|
||||
|
||||
private final BytesRef scratch = new BytesRef();
|
||||
|
||||
final static BytesRef END = new BytesRef("END");
|
||||
final static BytesRef FIELD = new BytesRef("field ");
|
||||
final static BytesRef DOC = new BytesRef(" doc ");
|
||||
final static BytesRef NORM = new BytesRef(" norm ");
|
||||
|
||||
public SimpleTextNormsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
final String normsFileName = IndexFileNames.segmentFileName(segment, "", NORMS_EXTENSION);
|
||||
out = directory.createOutput(normsFileName, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startField(FieldInfo info) throws IOException {
|
||||
assert info.omitNorms == false;
|
||||
docid = 0;
|
||||
write(FIELD);
|
||||
write(info.name);
|
||||
newLine();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNorm(byte norm) throws IOException {
|
||||
write(DOC);
|
||||
write(Integer.toString(docid));
|
||||
newLine();
|
||||
|
||||
write(NORM);
|
||||
write(norm);
|
||||
newLine();
|
||||
docid++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finish(int numDocs) throws IOException {
|
||||
if (docid != numDocs) {
|
||||
throw new RuntimeException("mergeNorms produced an invalid result: docCount is " + numDocs
|
||||
+ " but only saw " + docid + " file=" + out.toString() + "; now aborting this merge to prevent index corruption");
|
||||
}
|
||||
write(END);
|
||||
newLine();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
IOUtils.close(out);
|
||||
} finally {
|
||||
out = null;
|
||||
}
|
||||
}
|
||||
|
||||
private void write(String s) throws IOException {
|
||||
SimpleTextUtil.write(out, s, scratch);
|
||||
}
|
||||
|
||||
private void write(BytesRef bytes) throws IOException {
|
||||
SimpleTextUtil.write(out, bytes);
|
||||
}
|
||||
|
||||
private void write(byte b) throws IOException {
|
||||
scratch.grow(1);
|
||||
scratch.bytes[scratch.offset] = b;
|
||||
scratch.length = 1;
|
||||
SimpleTextUtil.write(out, scratch);
|
||||
}
|
||||
|
||||
private void newLine() throws IOException {
|
||||
SimpleTextUtil.writeNewline(out);
|
||||
}
|
||||
}
|
|
@ -210,18 +210,18 @@ public abstract class IndexDocValues implements Closeable {
|
|||
|
||||
/**
|
||||
* Returns the internal array representation iff this {@link Source} uses an
|
||||
* array as its inner representation, otherwise <code>null</code>.
|
||||
* array as its inner representation, otherwise <code>UOE</code>.
|
||||
*/
|
||||
public Object getArray() {
|
||||
return null;
|
||||
throw new UnsupportedOperationException("getArray is not supported");
|
||||
}
|
||||
|
||||
/**
|
||||
* If this {@link Source} is sorted this method will return an instance of
|
||||
* {@link SortedSource} otherwise <code>null</code>
|
||||
* {@link SortedSource} otherwise <code>UOE</code>
|
||||
*/
|
||||
public SortedSource asSortedSource() {
|
||||
return null;
|
||||
throw new UnsupportedOperationException("asSortedSource is not supported");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,6 +56,22 @@ public class FieldValueFilter extends Filter {
|
|||
this.field = field;
|
||||
this.negate = negate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the field this filter is applied on.
|
||||
* @return the field this filter is applied on.
|
||||
*/
|
||||
public String field() {
|
||||
return field;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff this filter is negated, otherwise <code>false</code>
|
||||
* @return <code>true</code> iff this filter is negated, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean negate() {
|
||||
return negate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
|
||||
|
|
|
@ -128,7 +128,7 @@ public final class SearcherManager {
|
|||
ExecutorService es) throws IOException {
|
||||
this.es = es;
|
||||
this.warmer = warmer;
|
||||
currentSearcher = new IndexSearcher(IndexReader.open(dir, true), es);
|
||||
currentSearcher = new IndexSearcher(IndexReader.open(dir), es);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.nio.channels.ClosedChannelException; // javadoc @link
|
|||
import java.nio.channels.FileChannel;
|
||||
import java.nio.channels.FileChannel.MapMode;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
import java.security.AccessController;
|
||||
|
@ -34,6 +34,7 @@ import java.security.PrivilegedExceptionAction;
|
|||
import java.security.PrivilegedActionException;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
import org.apache.lucene.util.MapBackedSet;
|
||||
import org.apache.lucene.util.Constants;
|
||||
|
||||
/** File-based {@link Directory} implementation that uses
|
||||
|
@ -259,7 +260,7 @@ public class MMapDirectory extends FSDirectory {
|
|||
private ByteBuffer curBuf; // redundant for speed: buffers[curBufIndex]
|
||||
|
||||
private boolean isClone = false;
|
||||
private final Map<MMapIndexInput,Boolean> clones = new WeakHashMap<MMapIndexInput,Boolean>();
|
||||
private final Set<MMapIndexInput> clones = new MapBackedSet<MMapIndexInput>(new WeakHashMap<MMapIndexInput,Boolean>());
|
||||
|
||||
MMapIndexInput(String resourceDescription, RandomAccessFile raf, long offset, long length, int chunkSizePower) throws IOException {
|
||||
super(resourceDescription);
|
||||
|
@ -430,7 +431,7 @@ public class MMapDirectory extends FSDirectory {
|
|||
|
||||
// register the new clone in our clone list to clean it up on closing:
|
||||
synchronized(this.clones) {
|
||||
this.clones.put(clone, Boolean.TRUE);
|
||||
this.clones.add(clone);
|
||||
}
|
||||
|
||||
return clone;
|
||||
|
@ -449,7 +450,7 @@ public class MMapDirectory extends FSDirectory {
|
|||
|
||||
// for extra safety unset also all clones' buffers:
|
||||
synchronized(this.clones) {
|
||||
for (final MMapIndexInput clone : this.clones.keySet()) {
|
||||
for (final MMapIndexInput clone : this.clones) {
|
||||
assert clone.isClone;
|
||||
clone.unsetBuffers();
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(farsiIndex, true);
|
||||
IndexReader reader = IndexReader.open(farsiIndex);
|
||||
IndexSearcher search = newSearcher(reader);
|
||||
|
||||
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
|
||||
|
|
|
@ -385,7 +385,7 @@ public class RandomIndexWriter implements Closeable {
|
|||
w.commit();
|
||||
switchDoDocValues();
|
||||
if (r.nextBoolean()) {
|
||||
return IndexReader.open(w.getDirectory(), new KeepOnlyLastCommitDeletionPolicy(), r.nextBoolean(), _TestUtil.nextInt(r, 1, 10));
|
||||
return IndexReader.open(w.getDirectory(), _TestUtil.nextInt(r, 1, 10));
|
||||
} else {
|
||||
return w.getReader(applyDeletions);
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ public class QueryUtils {
|
|||
Assert.assertEquals("writer has non-deleted docs",
|
||||
0, w.numDocs());
|
||||
w.close();
|
||||
IndexReader r = IndexReader.open(d, true);
|
||||
IndexReader r = IndexReader.open(d);
|
||||
Assert.assertEquals("reader has wrong number of deleted docs",
|
||||
numDeletedDocs, r.numDeletedDocs());
|
||||
return r;
|
||||
|
|
|
@ -70,13 +70,6 @@ public class TestBinaryDocument extends LuceneTestCase {
|
|||
assertTrue(stringFldStoredTest.equals(binaryValStored));
|
||||
|
||||
writer.close();
|
||||
reader.close();
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
/** delete the document from index */
|
||||
reader.deleteDocument(0);
|
||||
assertEquals(0, reader.numDocs());
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
|||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.NormsFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
|
@ -41,6 +42,7 @@ import org.apache.lucene.index.codecs.TermVectorsFormat;
|
|||
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40NormsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
|
@ -421,16 +423,20 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
// auxiliary directory
|
||||
Directory aux = newDirectory();
|
||||
|
||||
setUpDirs(dir, aux);
|
||||
setUpDirs(dir, aux, true);
|
||||
|
||||
IndexReader reader = IndexReader.open(aux, false);
|
||||
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
IndexWriter writer = new IndexWriter(aux, dontMergeConfig);
|
||||
for (int i = 0; i < 20; i++) {
|
||||
reader.deleteDocument(i);
|
||||
writer.deleteDocuments(new Term("id", "" + i));
|
||||
}
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(aux);
|
||||
assertEquals(10, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
IndexWriter writer = newWriter(
|
||||
writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
|
@ -454,7 +460,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Directory aux = newDirectory();
|
||||
Directory aux2 = newDirectory();
|
||||
|
||||
setUpDirs(dir, aux);
|
||||
setUpDirs(dir, aux, true);
|
||||
|
||||
IndexWriter writer = newWriter(
|
||||
aux2,
|
||||
|
@ -468,17 +474,25 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(3, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(aux, false);
|
||||
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(aux, dontMergeConfig);
|
||||
for (int i = 0; i < 27; i++) {
|
||||
reader.deleteDocument(i);
|
||||
writer.deleteDocuments(new Term("id", "" + i));
|
||||
}
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(aux);
|
||||
assertEquals(3, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
reader = IndexReader.open(aux2, false);
|
||||
dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(aux2, dontMergeConfig);
|
||||
for (int i = 0; i < 8; i++) {
|
||||
reader.deleteDocument(i);
|
||||
writer.deleteDocuments(new Term("id", "" + i));
|
||||
}
|
||||
writer.close();
|
||||
reader = IndexReader.open(aux2);
|
||||
assertEquals(22, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -523,7 +537,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void verifyNumDocs(Directory dir, int numDocs) throws IOException {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(numDocs, reader.maxDoc());
|
||||
assertEquals(numDocs, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -531,7 +545,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
private void verifyTermDocs(Directory dir, Term term, int numDocs)
|
||||
throws IOException {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
DocsEnum docsEnum = _TestUtil.docs(random, reader, term.field, term.bytes, null, null, false);
|
||||
int count = 0;
|
||||
while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
|
||||
|
@ -541,11 +555,19 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void setUpDirs(Directory dir, Directory aux) throws IOException {
|
||||
setUpDirs(dir, aux, false);
|
||||
}
|
||||
|
||||
private void setUpDirs(Directory dir, Directory aux, boolean withID) throws IOException {
|
||||
IndexWriter writer = null;
|
||||
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
|
||||
// add 1000 documents in 1 segment
|
||||
addDocs(writer, 1000);
|
||||
if (withID) {
|
||||
addDocsWithID(writer, 1000, 0);
|
||||
} else {
|
||||
addDocs(writer, 1000);
|
||||
}
|
||||
assertEquals(1000, writer.maxDoc());
|
||||
assertEquals(1, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
@ -559,7 +581,11 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
);
|
||||
// add 30 documents in 3 segments
|
||||
for (int i = 0; i < 3; i++) {
|
||||
addDocs(writer, 10);
|
||||
if (withID) {
|
||||
addDocsWithID(writer, 10, 10*i);
|
||||
} else {
|
||||
addDocs(writer, 10);
|
||||
}
|
||||
writer.close();
|
||||
writer = newWriter(
|
||||
aux,
|
||||
|
@ -657,7 +683,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
readers = new IndexReader[NUM_COPY];
|
||||
for(int i=0;i<NUM_COPY;i++)
|
||||
readers[i] = IndexReader.open(dir, true);
|
||||
readers[i] = IndexReader.open(dir);
|
||||
}
|
||||
|
||||
void launchThreads(final int numIter) {
|
||||
|
@ -783,7 +809,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
assertTrue("found unexpected failures: " + c.failures, c.failures.isEmpty());
|
||||
|
||||
IndexReader reader = IndexReader.open(c.dir2, true);
|
||||
IndexReader reader = IndexReader.open(c.dir2);
|
||||
assertEquals(expectedNumDocs, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -970,11 +996,12 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
}
|
||||
|
||||
private void addDocs3(IndexWriter writer, int numDocs) throws IOException {
|
||||
// just like addDocs but with ID, starting from docStart
|
||||
private void addDocsWithID(IndexWriter writer, int numDocs, int docStart) throws IOException {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
|
||||
doc.add(newField("id", "" + i, TextField.TYPE_STORED));
|
||||
doc.add(newField("id", "" + (docStart + i), TextField.TYPE_STORED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
||||
|
@ -991,7 +1018,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodec(codec));
|
||||
// add 100 documents
|
||||
addDocs3(writer, 100);
|
||||
addDocsWithID(writer, 100, 0);
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.commit();
|
||||
writer.close();
|
||||
|
@ -1122,6 +1149,11 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
public SegmentInfosFormat segmentInfosFormat() {
|
||||
return new Lucene40SegmentInfosFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NormsFormat normsFormat() {
|
||||
return new Lucene40NormsFormat();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -112,7 +112,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public void doWork() throws Throwable {
|
||||
IndexReader r = IndexReader.open(directory, true);
|
||||
IndexReader r = IndexReader.open(directory);
|
||||
assertEquals(100, r.numDocs());
|
||||
r.close();
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
}
|
||||
writer.commit();
|
||||
|
||||
IndexReader r = IndexReader.open(directory, true);
|
||||
IndexReader r = IndexReader.open(directory);
|
||||
assertEquals(100, r.numDocs());
|
||||
r.close();
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -378,28 +377,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// make sure we can do delete & setNorm against this segment:
|
||||
reader = IndexReader.open(dir, false);
|
||||
searcher = newSearcher(reader);
|
||||
Term searchTerm = new Term("id", "6");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("wrong delete count", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
searcher.close();
|
||||
|
||||
// make sure they "took":
|
||||
reader = IndexReader.open(dir, true);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 43, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 43, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// fully merge
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
|
@ -408,10 +385,10 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 43, hits.length);
|
||||
assertEquals("wrong number of hits", 44, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
doTestHits(hits, 43, searcher.getIndexReader());
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 44, searcher.getIndexReader());
|
||||
assertEquals("wrong first document", "21", d.get("id"));
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
|
@ -432,26 +409,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// make sure we can do a delete & setNorm against this segment:
|
||||
reader = IndexReader.open(dir, false);
|
||||
Term searchTerm = new Term("id", "6");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("wrong delete count", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(22, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
|
||||
// make sure they "took":
|
||||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 33, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 33, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// fully merge
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
|
@ -460,10 +417,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 33, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 33, searcher.getIndexReader());
|
||||
assertEquals("wrong number of hits", 34, hits.length);
|
||||
doTestHits(hits, 34, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
|
@ -504,16 +459,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
addNoProxDoc(writer);
|
||||
writer.close();
|
||||
|
||||
// Delete one doc so we get a .del file:
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
writer = new IndexWriter(dir,
|
||||
conf.setMergePolicy(doCFS ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES)
|
||||
);
|
||||
Term searchTerm = new Term("id", "7");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
|
||||
// Set one norm so we get a .s0 file:
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
writer.deleteDocuments(searchTerm);
|
||||
writer.close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
@ -549,39 +500,18 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// Delete one doc so we get a .del file:
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
|
||||
);
|
||||
Term searchTerm = new Term("id", "7");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
|
||||
// Set one norm so we get a .s0 file:
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
|
||||
// The numbering of fields can vary depending on which
|
||||
// JRE is in use. On some JREs we see content bound to
|
||||
// field 0; on others, field 1. So, here we have to
|
||||
// figure out which field number corresponds to
|
||||
// "content", and then set our expected file names below
|
||||
// accordingly:
|
||||
CompoundFileDirectory cfsReader = new CompoundFileDirectory(dir, "_0.cfs", newIOContext(random), false);
|
||||
FieldInfosReader infosReader = Codec.getDefault().fieldInfosFormat().getFieldInfosReader();
|
||||
FieldInfos fieldInfos = infosReader.read(cfsReader, "_0", IOContext.READONCE);
|
||||
int contentFieldIndex = -1;
|
||||
for (FieldInfo fi : fieldInfos) {
|
||||
if (fi.name.equals("content")) {
|
||||
contentFieldIndex = fi.number;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cfsReader.close();
|
||||
assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
|
||||
writer.deleteDocuments(searchTerm);
|
||||
writer.close();
|
||||
|
||||
// Now verify file names:
|
||||
String[] expected = new String[] {"_0.cfs", "_0.cfe",
|
||||
"_0_1.del",
|
||||
"_0_1.s" + contentFieldIndex,
|
||||
"segments_2",
|
||||
"segments.gen"};
|
||||
|
||||
|
|
|
@ -36,20 +36,19 @@ public class TestCheckIndex extends LuceneTestCase {
|
|||
public void testDeletedDocs() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
|
||||
Document doc = new Document();
|
||||
FieldType customType = new FieldType(TextField.TYPE_STORED);
|
||||
customType.setStoreTermVectors(true);
|
||||
customType.setStoreTermVectorPositions(true);
|
||||
customType.setStoreTermVectorOffsets(true);
|
||||
doc.add(newField("field", "aaa", customType));
|
||||
for(int i=0;i<19;i++) {
|
||||
Document doc = new Document();
|
||||
FieldType customType = new FieldType(TextField.TYPE_STORED);
|
||||
customType.setStoreTermVectors(true);
|
||||
customType.setStoreTermVectorPositions(true);
|
||||
customType.setStoreTermVectorOffsets(true);
|
||||
doc.add(newField("field", "aaa"+i, customType));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.forceMerge(1);
|
||||
writer.commit();
|
||||
writer.deleteDocuments(new Term("field","aaa5"));
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(5);
|
||||
reader.close();
|
||||
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
|
||||
CheckIndex checker = new CheckIndex(dir);
|
||||
|
@ -73,7 +72,7 @@ public class TestCheckIndex extends LuceneTestCase {
|
|||
|
||||
assertNotNull(seg.termIndexStatus);
|
||||
assertNull(seg.termIndexStatus.error);
|
||||
assertEquals(1, seg.termIndexStatus.termCount);
|
||||
assertEquals(19, seg.termIndexStatus.termCount);
|
||||
assertEquals(19, seg.termIndexStatus.totFreq);
|
||||
assertEquals(18, seg.termIndexStatus.totPos);
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
}
|
||||
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
assertEquals(200+extraCount, reader.numDocs());
|
||||
reader.close();
|
||||
directory.close();
|
||||
|
@ -158,7 +158,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
}
|
||||
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
// Verify that we did not lose any deletes...
|
||||
assertEquals(450, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -230,7 +230,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
|
||||
writer.close(false);
|
||||
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
assertEquals((1+iter)*182, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
IndexWriter writer = initIndex(random, true);
|
||||
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
|
||||
crash(writer);
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertTrue(reader.numDocs() < 157);
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -85,7 +85,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
writer = initIndex(random, dir, false);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertTrue(reader.numDocs() < 314);
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -108,7 +108,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
dir.fileLength(l[i]) + " bytes");
|
||||
*/
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertTrue(reader.numDocs() >= 157);
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -129,7 +129,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
|
||||
*/
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(157, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -150,57 +150,9 @@ public class TestCrash extends LuceneTestCase {
|
|||
for(int i=0;i<l.length;i++)
|
||||
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
|
||||
*/
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(157, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testCrashReaderDeletes() throws IOException {
|
||||
|
||||
IndexWriter writer = initIndex(random, false);
|
||||
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
|
||||
|
||||
writer.close(false);
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(3);
|
||||
|
||||
dir.crash();
|
||||
|
||||
/*
|
||||
String[] l = dir.list();
|
||||
Arrays.sort(l);
|
||||
for(int i=0;i<l.length;i++)
|
||||
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
|
||||
*/
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(157, reader.numDocs());
|
||||
reader.close();
|
||||
dir.clearCrash();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testCrashReaderDeletesAfterClose() throws IOException {
|
||||
|
||||
IndexWriter writer = initIndex(random, false);
|
||||
MockDirectoryWrapper dir = (MockDirectoryWrapper) writer.getDirectory();
|
||||
|
||||
writer.close(false);
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(3);
|
||||
reader.close();
|
||||
|
||||
dir.crash();
|
||||
|
||||
/*
|
||||
String[] l = dir.list();
|
||||
Arrays.sort(l);
|
||||
for(int i=0;i<l.length;i++)
|
||||
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
|
||||
*/
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(156, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,260 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestCrashCausesCorruptIndex extends LuceneTestCase {
|
||||
|
||||
File path;
|
||||
|
||||
/**
|
||||
* LUCENE-3627: This test fails.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testCrashCorruptsIndexing() throws Exception {
|
||||
path = _TestUtil.getTempDir("testCrashCorruptsIndexing");
|
||||
|
||||
indexAndCrashOnCreateOutputSegments2();
|
||||
|
||||
searchForFleas(2);
|
||||
|
||||
indexAfterRestart();
|
||||
|
||||
searchForFleas(3);
|
||||
}
|
||||
|
||||
/**
|
||||
* index 1 document and commit.
|
||||
* prepare for crashing.
|
||||
* index 1 more document, and upon commit, creation of segments_2 will crash.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
private void indexAndCrashOnCreateOutputSegments2() throws IOException {
|
||||
Directory realDirectory = FSDirectory.open(path);
|
||||
CrashAfterCreateOutput crashAfterCreateOutput = new CrashAfterCreateOutput(realDirectory);
|
||||
|
||||
// NOTE: cannot use RandomIndexWriter because it
|
||||
// sometimes commits:
|
||||
IndexWriter indexWriter = new IndexWriter(crashAfterCreateOutput,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
|
||||
indexWriter.addDocument(getDocument());
|
||||
// writes segments_1:
|
||||
indexWriter.commit();
|
||||
|
||||
crashAfterCreateOutput.setCrashAfterCreateOutput("segments_2");
|
||||
indexWriter.addDocument(getDocument());
|
||||
try {
|
||||
// tries to write segments_2 but hits fake exc:
|
||||
indexWriter.commit();
|
||||
fail("should have hit CrashingException");
|
||||
} catch (CrashingException e) {
|
||||
// expected
|
||||
}
|
||||
// writes segments_3
|
||||
indexWriter.close();
|
||||
assertFalse(realDirectory.fileExists("segments_2"));
|
||||
crashAfterCreateOutput.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to index another 1 document.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
private void indexAfterRestart() throws IOException {
|
||||
Directory realDirectory = newFSDirectory(path);
|
||||
|
||||
// LUCENE-3627 (before the fix): this line fails because
|
||||
// it doesn't know what to do with the created but empty
|
||||
// segments_2 file
|
||||
IndexWriter indexWriter = new IndexWriter(realDirectory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
|
||||
// currently the test fails above.
|
||||
// however, to test the fix, the following lines should pass as well.
|
||||
indexWriter.addDocument(getDocument());
|
||||
indexWriter.close();
|
||||
assertFalse(realDirectory.fileExists("segments_2"));
|
||||
realDirectory.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an example search.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws ParseException
|
||||
*/
|
||||
private void searchForFleas(final int expectedTotalHits) throws IOException {
|
||||
Directory realDirectory = newFSDirectory(path);
|
||||
IndexReader indexReader = IndexReader.open(realDirectory);
|
||||
IndexSearcher indexSearcher = newSearcher(indexReader);
|
||||
TopDocs topDocs = indexSearcher.search(new TermQuery(new Term(TEXT_FIELD, "fleas")), 10);
|
||||
assertNotNull(topDocs);
|
||||
assertEquals(expectedTotalHits, topDocs.totalHits);
|
||||
indexSearcher.close();
|
||||
indexReader.close();
|
||||
realDirectory.close();
|
||||
}
|
||||
|
||||
private static final String TEXT_FIELD = "text";
|
||||
|
||||
/**
|
||||
* Gets a document with content "my dog has fleas".
|
||||
*/
|
||||
private Document getDocument() {
|
||||
Document document = new Document();
|
||||
document.add(newField(TEXT_FIELD, "my dog has fleas", TextField.TYPE_UNSTORED));
|
||||
return document;
|
||||
}
|
||||
|
||||
/**
|
||||
* The marker RuntimeException that we use in lieu of an
|
||||
* actual machine crash.
|
||||
*/
|
||||
private static class CrashingException extends RuntimeException {
|
||||
/**
|
||||
*
|
||||
*/
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public CrashingException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test class provides direct access to "simulating" a crash right after
|
||||
* realDirectory.createOutput(..) has been called on a certain specified name.
|
||||
*/
|
||||
private static class CrashAfterCreateOutput extends Directory {
|
||||
|
||||
private Directory realDirectory;
|
||||
private String crashAfterCreateOutput;
|
||||
|
||||
public CrashAfterCreateOutput(Directory realDirectory) throws IOException {
|
||||
this.realDirectory = realDirectory;
|
||||
setLockFactory(realDirectory.getLockFactory());
|
||||
}
|
||||
|
||||
public void setCrashAfterCreateOutput(String name) {
|
||||
this.crashAfterCreateOutput = name;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
realDirectory.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext cxt) throws IOException {
|
||||
IndexOutput indexOutput = realDirectory.createOutput(name, cxt);
|
||||
if (null != crashAfterCreateOutput && name.equals(crashAfterCreateOutput)) {
|
||||
// CRASH!
|
||||
indexOutput.close();
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now crash");
|
||||
new Throwable().printStackTrace(System.out);
|
||||
}
|
||||
throw new CrashingException("crashAfterCreateOutput "+crashAfterCreateOutput);
|
||||
}
|
||||
return indexOutput;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void deleteFile(String name) throws IOException {
|
||||
realDirectory.deleteFile(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public boolean fileExists(String name) throws IOException {
|
||||
return realDirectory.fileExists(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public long fileLength(String name) throws IOException {
|
||||
return realDirectory.fileLength(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public long fileModified(String name) throws IOException {
|
||||
return realDirectory.fileModified(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public String[] listAll() throws IOException {
|
||||
return realDirectory.listAll();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public IndexInput openInput(String name, IOContext cxt) throws IOException {
|
||||
return realDirectory.openInput(name, cxt);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
realDirectory.sync(names);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -25,6 +25,7 @@ import java.util.Collection;
|
|||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
|
@ -73,7 +74,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
}
|
||||
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
|
||||
IndexCommit lastCommit = commits.get(commits.size()-1);
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().length, r.getSequentialSubReaders().length, lastCommit.getSegmentCount());
|
||||
r.close();
|
||||
verifyCommitOrder(commits);
|
||||
|
@ -259,7 +260,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
while(gen > 0) {
|
||||
try {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
reader.close();
|
||||
fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
|
||||
"",
|
||||
|
@ -351,7 +352,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
// Make sure we can open a reader on each commit:
|
||||
for (final IndexCommit commit : commits) {
|
||||
IndexReader r = IndexReader.open(commit, null, false);
|
||||
IndexReader r = IndexReader.open(commit);
|
||||
r.close();
|
||||
}
|
||||
|
||||
|
@ -360,7 +361,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
|
||||
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
|
||||
while(gen > 0) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
reader.close();
|
||||
dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
|
||||
gen--;
|
||||
|
@ -435,7 +436,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
// Should undo our rollback:
|
||||
writer.rollback();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
// Still merged, still 11 docs
|
||||
assertEquals(1, r.getSequentialSubReaders().length);
|
||||
assertEquals(11, r.numDocs());
|
||||
|
@ -450,7 +451,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
// Now 8 because we made another commit
|
||||
assertEquals(7, IndexReader.listCommits(dir).size());
|
||||
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
// Not fully merged because we rolled it back, and now only
|
||||
// 10 docs
|
||||
assertTrue(r.getSequentialSubReaders().length > 1);
|
||||
|
@ -462,7 +463,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
assertEquals(1, r.getSequentialSubReaders().length);
|
||||
assertEquals(10, r.numDocs());
|
||||
r.close();
|
||||
|
@ -474,7 +475,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
// Reader still sees fully merged index, because writer
|
||||
// opened on the prior commit has not yet committed:
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
assertEquals(1, r.getSequentialSubReaders().length);
|
||||
assertEquals(10, r.numDocs());
|
||||
r.close();
|
||||
|
@ -482,7 +483,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// Now reader sees not-fully-merged index:
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
assertTrue(r.getSequentialSubReaders().length > 1);
|
||||
assertEquals(10, r.numDocs());
|
||||
r.close();
|
||||
|
@ -535,7 +536,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
// Simplistic check: just verify the index is in fact
|
||||
// readable:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
reader.close();
|
||||
|
||||
dir.close();
|
||||
|
@ -583,7 +584,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
|
||||
for(int i=0;i<N+1;i++) {
|
||||
try {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
reader.close();
|
||||
if (i == N) {
|
||||
fail("should have failed on commits prior to last " + N);
|
||||
|
@ -603,140 +604,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test a deletion policy that keeps last N commits
|
||||
* around, with reader doing deletes.
|
||||
*/
|
||||
public void testKeepLastNDeletionPolicyWithReader() throws IOException {
|
||||
final int N = 10;
|
||||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: pass=" + pass);
|
||||
}
|
||||
|
||||
boolean useCompoundFile = (pass % 2) != 0;
|
||||
|
||||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
|
||||
MergePolicy mp = conf.getMergePolicy();
|
||||
if (mp instanceof LogMergePolicy) {
|
||||
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
|
||||
}
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
writer.close();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Query query = new TermQuery(searchTerm);
|
||||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: write i=" + i);
|
||||
}
|
||||
conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
|
||||
mp = conf.getMergePolicy();
|
||||
if (mp instanceof LogMergePolicy) {
|
||||
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
|
||||
}
|
||||
writer = new IndexWriter(dir, conf);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
}
|
||||
// this is a commit
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: close writer");
|
||||
}
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, policy, false);
|
||||
reader.deleteDocument(3*i+1);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(4*i+1, "content", sim.encodeNormValue(2.0F));
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(16*(1+i), hits.length);
|
||||
// this is a commit
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: close reader numOnCommit=" + policy.numOnCommit);
|
||||
}
|
||||
reader.close();
|
||||
searcher.close();
|
||||
}
|
||||
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
|
||||
mp = conf.getMergePolicy();
|
||||
if (mp instanceof LogMergePolicy) {
|
||||
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
|
||||
}
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
final boolean wasFullyMerged = r.getSequentialSubReaders().length == 1 && !r.hasDeletions();
|
||||
r.close();
|
||||
writer = new IndexWriter(dir, conf);
|
||||
writer.forceMerge(1);
|
||||
// this is a commit
|
||||
writer.close();
|
||||
|
||||
assertEquals(2*(N+1)+1, policy.numOnInit);
|
||||
assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit);
|
||||
|
||||
IndexReader rwReader = IndexReader.open(dir, false);
|
||||
IndexSearcher searcher = new IndexSearcher(rwReader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(176, hits.length);
|
||||
|
||||
// Simplistic check: just verify only the past N segments_N's still
|
||||
// exist, and, I can open a reader on each:
|
||||
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
|
||||
|
||||
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
|
||||
int expectedCount = 176;
|
||||
searcher.close();
|
||||
rwReader.close();
|
||||
for(int i=0;i<N+1;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: i=" + i);
|
||||
}
|
||||
try {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" got reader=" + reader);
|
||||
}
|
||||
|
||||
// Work backwards in commits on what the expected
|
||||
// count should be.
|
||||
searcher = newSearcher(reader);
|
||||
hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
if (i > 1) {
|
||||
if (i % 2 == 0) {
|
||||
expectedCount += 1;
|
||||
} else {
|
||||
expectedCount -= 17;
|
||||
}
|
||||
}
|
||||
assertEquals("maxDoc=" + searcher.getIndexReader().maxDoc() + " numDocs=" + searcher.getIndexReader().numDocs(), expectedCount, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
if (i == N) {
|
||||
fail("should have failed on commits before last 5");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (i != N) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
if (i < N) {
|
||||
dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
|
||||
}
|
||||
gen--;
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Test a deletion policy that keeps last N commits
|
||||
* around, through creates.
|
||||
|
@ -777,18 +644,21 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
}
|
||||
writer = new IndexWriter(dir, conf);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
addDocWithID(writer, i*(N+1)+j);
|
||||
}
|
||||
// this is a commit
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, policy, false);
|
||||
reader.deleteDocument(3);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(5, "content", sim.encodeNormValue(2.0F));
|
||||
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setIndexDeletionPolicy(policy)
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(dir, conf);
|
||||
writer.deleteDocuments(new Term("id", "" + (i*(N+1)+3)));
|
||||
// this is a commit
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(16, hits.length);
|
||||
// this is a commit
|
||||
reader.close();
|
||||
searcher.close();
|
||||
|
||||
|
@ -803,7 +673,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
assertEquals(3*(N+1), policy.numOnInit);
|
||||
assertEquals(3*(N+1)+1, policy.numOnCommit);
|
||||
|
||||
IndexReader rwReader = IndexReader.open(dir, false);
|
||||
IndexReader rwReader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(rwReader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(0, hits.length);
|
||||
|
@ -820,7 +690,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
try {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
|
||||
// Work backwards in commits on what the expected
|
||||
// count should be.
|
||||
|
@ -854,6 +724,13 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void addDocWithID(IndexWriter writer, int id) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
|
||||
doc.add(newField("id", "" + id, StringField.TYPE_UNSTORED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
|
||||
protected IndexReader openReader() throws IOException {
|
||||
IndexReader reader;
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
assertTrue(reader instanceof DirectoryReader);
|
||||
|
||||
assertTrue(dir != null);
|
||||
|
@ -74,12 +74,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
return reader;
|
||||
}
|
||||
|
||||
public void test() throws Exception {
|
||||
doTestDocument();
|
||||
doTestUndeleteAll();
|
||||
}
|
||||
|
||||
public void doTestDocument() throws IOException {
|
||||
public void testDocument() throws IOException {
|
||||
sis.read(dir);
|
||||
IndexReader reader = openReader();
|
||||
assertTrue(reader != null);
|
||||
|
@ -94,50 +89,13 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
TestSegmentReader.checkNorms(reader);
|
||||
reader.close();
|
||||
}
|
||||
|
||||
public void doTestUndeleteAll() throws IOException {
|
||||
sis.read(dir);
|
||||
IndexReader reader = openReader();
|
||||
assertTrue(reader != null);
|
||||
assertEquals( 2, reader.numDocs() );
|
||||
reader.deleteDocument(0);
|
||||
assertEquals( 1, reader.numDocs() );
|
||||
reader.undeleteAll();
|
||||
assertEquals( 2, reader.numDocs() );
|
||||
|
||||
// Ensure undeleteAll survives commit/close/reopen:
|
||||
reader.commit();
|
||||
reader.close();
|
||||
|
||||
if (reader instanceof MultiReader)
|
||||
// MultiReader does not "own" the directory so it does
|
||||
// not write the changes to sis on commit:
|
||||
sis.commit(dir, sis.codecFormat());
|
||||
|
||||
sis.read(dir);
|
||||
reader = openReader();
|
||||
assertEquals( 2, reader.numDocs() );
|
||||
|
||||
reader.deleteDocument(0);
|
||||
assertEquals( 1, reader.numDocs() );
|
||||
reader.commit();
|
||||
reader.close();
|
||||
if (reader instanceof MultiReader)
|
||||
// MultiReader does not "own" the directory so it does
|
||||
// not write the changes to sis on commit:
|
||||
sis.commit(dir, sis.codecFormat());
|
||||
sis.read(dir);
|
||||
reader = openReader();
|
||||
assertEquals( 1, reader.numDocs() );
|
||||
reader.close();
|
||||
}
|
||||
|
||||
public void testIsCurrent() throws IOException {
|
||||
Directory ramDir1=newDirectory();
|
||||
addDoc(random, ramDir1, "test foo", true);
|
||||
Directory ramDir2=newDirectory();
|
||||
addDoc(random, ramDir2, "test blah", true);
|
||||
IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false)};
|
||||
IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2)};
|
||||
MultiReader mr = new MultiReader(readers);
|
||||
assertTrue(mr.isCurrent()); // just opened, must be current
|
||||
addDoc(random, ramDir1, "more text", false);
|
||||
|
@ -163,8 +121,8 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
Directory ramDir3=newDirectory();
|
||||
addDoc(random, ramDir3, "test wow", true);
|
||||
|
||||
IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir3, false)};
|
||||
IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false), IndexReader.open(ramDir3, false)};
|
||||
IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir3)};
|
||||
IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2), IndexReader.open(ramDir3)};
|
||||
MultiReader mr2 = new MultiReader(readers1);
|
||||
MultiReader mr3 = new MultiReader(readers2);
|
||||
|
||||
|
|
|
@ -196,8 +196,8 @@ public class TestDoc extends LuceneTestCase {
|
|||
private SegmentInfo merge(Directory dir, SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile)
|
||||
throws Exception {
|
||||
IOContext context = newIOContext(random);
|
||||
SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
|
||||
SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
|
||||
SegmentReader r1 = SegmentReader.get(si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
|
||||
SegmentReader r2 = SegmentReader.get(si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context);
|
||||
|
||||
final Codec codec = Codec.getDefault();
|
||||
SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), si1.dir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, merged, MergeState.CheckAbort.NONE, null, new FieldInfos(new FieldInfos.FieldNumberBiMap()), codec, context);
|
||||
|
@ -224,7 +224,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
|
||||
private void printSegment(PrintWriter out, SegmentInfo si)
|
||||
throws Exception {
|
||||
SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader = SegmentReader.get(si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
for (int i = 0; i < reader.numDocs(); i++)
|
||||
out.println(reader.document(i));
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
SegmentInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader = SegmentReader.get(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
assertTrue(reader != null);
|
||||
Document doc = reader.document(0);
|
||||
assertTrue(doc != null);
|
||||
|
@ -125,7 +125,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader = SegmentReader.get(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
|
||||
"repeated", new BytesRef("repeated"));
|
||||
|
@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader = SegmentReader.get(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
|
@ -241,7 +241,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader = SegmentReader.get(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
|
@ -285,7 +285,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
// f1
|
||||
Terms tfv1 = reader.getTermVectors(0).terms("f1");
|
||||
assertNotNull(tfv1);
|
||||
|
@ -326,7 +326,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir, false));
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
// f1
|
||||
assertFalse("f1 should have no norms", reader.hasNorms("f1"));
|
||||
|
|
|
@ -206,7 +206,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
|
||||
FaultyIndexInput.doFail = true;
|
||||
|
||||
|
|
|
@ -28,6 +28,9 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.Bits;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.HashSet;
|
||||
|
||||
public class TestFilterIndexReader extends LuceneTestCase {
|
||||
|
||||
|
@ -144,11 +147,11 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
|
||||
Directory target = newDirectory();
|
||||
writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
IndexReader reader = new TestReader(IndexReader.open(directory, true));
|
||||
IndexReader reader = new TestReader(IndexReader.open(directory));
|
||||
writer.addIndexes(reader);
|
||||
writer.close();
|
||||
reader.close();
|
||||
reader = IndexReader.open(target, true);
|
||||
reader = IndexReader.open(target);
|
||||
|
||||
TermsEnum terms = MultiFields.getTerms(reader, "default").iterator(null);
|
||||
while (terms.next() != null) {
|
||||
|
@ -167,4 +170,28 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
directory.close();
|
||||
target.close();
|
||||
}
|
||||
|
||||
public void testOverrideMethods() throws Exception {
|
||||
HashSet<String> methodsThatShouldNotBeOverridden = new HashSet<String>();
|
||||
methodsThatShouldNotBeOverridden.add("doOpenIfChanged");
|
||||
methodsThatShouldNotBeOverridden.add("clone");
|
||||
boolean fail = false;
|
||||
for (Method m : FilterIndexReader.class.getMethods()) {
|
||||
int mods = m.getModifiers();
|
||||
if (Modifier.isStatic(mods) || Modifier.isFinal(mods)) {
|
||||
continue;
|
||||
}
|
||||
Class< ? > declaringClass = m.getDeclaringClass();
|
||||
String name = m.getName();
|
||||
if (declaringClass != FilterIndexReader.class && declaringClass != Object.class && !methodsThatShouldNotBeOverridden.contains(name)) {
|
||||
System.err.println("method is not overridden by FilterIndexReader: " + name);
|
||||
fail = true;
|
||||
} else if (declaringClass == FilterIndexReader.class && methodsThatShouldNotBeOverridden.contains(name)) {
|
||||
System.err.println("method should not be overridden by FilterIndexReader: " + name);
|
||||
fail = true;
|
||||
}
|
||||
}
|
||||
assertFalse("FilterIndexReader overrides (or not) some problematic methods; see log above", fail);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -68,14 +68,14 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// Delete one doc so we get a .del file:
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
|
||||
);
|
||||
Term searchTerm = new Term("id", "7");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
// Set one norm so we get a .s0 file:
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
writer.deleteDocuments(searchTerm);
|
||||
writer.close();
|
||||
|
||||
// Now, artificially create an extra .del file & extra
|
||||
// .s0 file:
|
||||
|
@ -87,47 +87,6 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
}
|
||||
*/
|
||||
|
||||
// The numbering of fields can vary depending on which
|
||||
// JRE is in use. On some JREs we see content bound to
|
||||
// field 0; on others, field 1. So, here we have to
|
||||
// figure out which field number corresponds to
|
||||
// "content", and then set our expected file names below
|
||||
// accordingly:
|
||||
CompoundFileDirectory cfsReader = new CompoundFileDirectory(dir, "_2.cfs", newIOContext(random), false);
|
||||
FieldInfosReader infosReader = Codec.getDefault().fieldInfosFormat().getFieldInfosReader();
|
||||
FieldInfos fieldInfos = infosReader.read(cfsReader, "2", IOContext.READONCE);
|
||||
int contentFieldIndex = -1;
|
||||
for (FieldInfo fi : fieldInfos) {
|
||||
if (fi.name.equals("content")) {
|
||||
contentFieldIndex = fi.number;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cfsReader.close();
|
||||
assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
|
||||
|
||||
String normSuffix = "s" + contentFieldIndex;
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that actually has a separate norms file
|
||||
// already:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_2_2." + normSuffix);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that actually has a separate norms file
|
||||
// already, using the "not compound file" extension:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_2_2.f" + contentFieldIndex);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that does not have a separate norms
|
||||
// file already:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_1_1." + normSuffix);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that does not have a separate norms
|
||||
// file already using the "not compound file" extension:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_1_1.f" + contentFieldIndex);
|
||||
|
||||
// Create a bogus separate del file for a
|
||||
// segment that already has a separate del file:
|
||||
copyFile(dir, "_0_1.del", "_0_2.del");
|
||||
|
|
|
@ -52,64 +52,8 @@ import org.apache.lucene.util._TestUtil;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
public class TestIndexReader extends LuceneTestCase
|
||||
{
|
||||
|
||||
public void testCommitUserData() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
|
||||
Map<String,String> commitUserData = new HashMap<String,String>();
|
||||
commitUserData.put("foo", "fighters");
|
||||
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMaxBufferedDocs(2));
|
||||
for(int i=0;i<27;i++)
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
IndexReader r = IndexReader.open(d, false);
|
||||
r.deleteDocument(5);
|
||||
r.flush(commitUserData);
|
||||
IndexCommit c = r.getIndexCommit();
|
||||
r.close();
|
||||
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(d);
|
||||
IndexReader r2 = IndexReader.open(d, false);
|
||||
assertEquals(c.getUserData(), commitUserData);
|
||||
|
||||
assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
|
||||
|
||||
// Change the index
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random)).setOpenMode(
|
||||
OpenMode.APPEND).setMaxBufferedDocs(2));
|
||||
for(int i=0;i<7;i++)
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
IndexReader r3 = IndexReader.openIfChanged(r2);
|
||||
assertNotNull(r3);
|
||||
assertFalse(c.equals(r3.getIndexCommit()));
|
||||
assertFalse(r2.getIndexCommit().getSegmentCount() == 1 && !r2.hasDeletions());
|
||||
r3.close();
|
||||
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
r3 = IndexReader.openIfChanged(r2);
|
||||
assertNotNull(r3);
|
||||
assertEquals(1, r3.getIndexCommit().getSegmentCount());
|
||||
r2.close();
|
||||
r3.close();
|
||||
d.close();
|
||||
}
|
||||
|
||||
public class TestIndexReader extends LuceneTestCase {
|
||||
|
||||
public void testIsCurrent() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
|
@ -117,7 +61,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
// set up reader:
|
||||
IndexReader reader = IndexReader.open(d, false);
|
||||
IndexReader reader = IndexReader.open(d);
|
||||
assertTrue(reader.isCurrent());
|
||||
// modify index by adding another document:
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
|
@ -160,7 +104,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
writer.close();
|
||||
// set up reader
|
||||
IndexReader reader = IndexReader.open(d, false);
|
||||
IndexReader reader = IndexReader.open(d);
|
||||
Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
|
||||
assertTrue(fieldNames.contains("keyword"));
|
||||
assertTrue(fieldNames.contains("text"));
|
||||
|
@ -220,7 +164,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
writer.close();
|
||||
// verify fields again
|
||||
reader = IndexReader.open(d, false);
|
||||
reader = IndexReader.open(d);
|
||||
fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
|
||||
assertEquals(13, fieldNames.size()); // the following fields
|
||||
assertTrue(fieldNames.contains("keyword"));
|
||||
|
@ -355,7 +299,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
doc.add(new TextField("junk", "junk text"));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
Document doc2 = reader.document(reader.maxDoc() - 1);
|
||||
IndexableField[] fields = doc2.getFields("bin1");
|
||||
assertNotNull(fields);
|
||||
|
@ -374,7 +318,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
doc2 = reader.document(reader.maxDoc() - 1);
|
||||
fields = doc2.getFields("bin1");
|
||||
assertNotNull(fields);
|
||||
|
@ -390,170 +334,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure attempts to make changes after reader is
|
||||
// closed throws IOException:
|
||||
public void testChangesAfterClose() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 11 documents with term : aaa
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < 11; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
||||
// Close reader:
|
||||
reader.close();
|
||||
|
||||
// Then, try to make changes:
|
||||
try {
|
||||
reader.deleteDocument(4);
|
||||
fail("deleteDocument after close failed to throw IOException");
|
||||
} catch (AlreadyClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
|
||||
fail("setNorm after close failed to throw IOException");
|
||||
} catch (AlreadyClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
reader.undeleteAll();
|
||||
fail("undeleteAll after close failed to throw IOException");
|
||||
} catch (AlreadyClosedException e) {
|
||||
// expected
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure we get lock obtain failed exception with 2 writers:
|
||||
public void testLockObtainFailed() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 11 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.commit();
|
||||
for (int i = 0; i < 11; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
|
||||
// Create reader:
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// Try to make changes
|
||||
try {
|
||||
reader.deleteDocument(4);
|
||||
fail("deleteDocument should have hit LockObtainFailedException");
|
||||
} catch (LockObtainFailedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
|
||||
fail("setNorm should have hit LockObtainFailedException");
|
||||
} catch (LockObtainFailedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
reader.undeleteAll();
|
||||
fail("undeleteAll should have hit LockObtainFailedException");
|
||||
} catch (LockObtainFailedException e) {
|
||||
// expected
|
||||
}
|
||||
writer.close();
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure you can set norms & commit even if a reader
|
||||
// is open against the index:
|
||||
public void testWritingNorms() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
// now open reader & set norm for doc 0
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
|
||||
// we should be holding the write lock now:
|
||||
assertTrue("locked", IndexWriter.isLocked(dir));
|
||||
|
||||
reader.commit();
|
||||
|
||||
// we should not be holding the write lock now:
|
||||
assertTrue("not locked", !IndexWriter.isLocked(dir));
|
||||
|
||||
// open a 2nd reader:
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
|
||||
// set norm again for doc 0
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(3.0f));
|
||||
assertTrue("locked", IndexWriter.isLocked(dir));
|
||||
|
||||
reader.close();
|
||||
|
||||
// we should not be holding the write lock now:
|
||||
assertTrue("not locked", !IndexWriter.isLocked(dir));
|
||||
|
||||
reader2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
// Make sure you can set norms & commit, and there are
|
||||
// no extra norms files left:
|
||||
public void testWritingNormsNoReader() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
// now open reader & set norm for doc 0 (writes to
|
||||
// _0_1.s0)
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
|
||||
// now open reader again & set norm for doc 0 (writes to _0_2.s0)
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
assertFalse("failed to remove first generation norms file on writing second generation",
|
||||
dir.fileExists("_0_1.s0"));
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/* ??? public void testOpenEmptyDirectory() throws IOException{
|
||||
String dirName = "test.empty";
|
||||
File fileDirName = new File(dirName);
|
||||
|
@ -590,7 +370,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
// Now open existing directory and test that reader closes all files
|
||||
dir = newFSDirectory(dirFile);
|
||||
IndexReader reader1 = IndexReader.open(dir, false);
|
||||
IndexReader reader1 = IndexReader.open(dir);
|
||||
reader1.close();
|
||||
dir.close();
|
||||
|
||||
|
@ -608,7 +388,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
|
||||
writer.close();
|
||||
assertTrue(IndexReader.indexExists(dir));
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertFalse(IndexWriter.isLocked(dir)); // reader only, no lock
|
||||
long version = IndexReader.lastModified(dir);
|
||||
if (i == 1) {
|
||||
|
@ -623,7 +403,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
assertTrue("old lastModified is " + version + "; new lastModified is " + IndexReader.lastModified(dir), version <= IndexReader.lastModified(dir));
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -638,7 +418,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
|
||||
writer.close();
|
||||
assertTrue(IndexReader.indexExists(dir));
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertFalse(IndexWriter.isLocked(dir)); // reader only, no lock
|
||||
long version = IndexReader.getCurrentVersion(dir);
|
||||
reader.close();
|
||||
|
@ -647,114 +427,17 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
assertTrue("old version is " + version + "; new version is " + IndexReader.getCurrentVersion(dir), version < IndexReader.getCurrentVersion(dir));
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLock() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
try {
|
||||
reader.deleteDocument(0);
|
||||
fail("expected lock");
|
||||
} catch(IOException e) {
|
||||
// expected exception
|
||||
}
|
||||
try {
|
||||
IndexWriter.unlock(dir); // this should not be done in the real world!
|
||||
} catch (LockReleaseFailedException lrfe) {
|
||||
writer.close();
|
||||
}
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDocsOutOfOrderJIRA140() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for(int i=0;i<11;i++) {
|
||||
addDoc(writer, "aaa");
|
||||
}
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// Try to delete an invalid docId, yet, within range
|
||||
// of the final bits of the BitVector:
|
||||
|
||||
boolean gotException = false;
|
||||
try {
|
||||
reader.deleteDocument(11);
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
gotException = true;
|
||||
}
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
|
||||
// We must add more docs to get a new segment written
|
||||
for(int i=0;i<11;i++) {
|
||||
addDoc(writer, "aaa");
|
||||
}
|
||||
|
||||
// Without the fix for LUCENE-140 this call will
|
||||
// [incorrectly] hit a "docs out of order"
|
||||
// IllegalStateException because above out-of-bounds
|
||||
// deleteDocument corrupted the index:
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
if (!gotException) {
|
||||
fail("delete of out-of-bounds doc number failed to hit exception");
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testExceptionReleaseWriteLockJIRA768() throws IOException {
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDoc(writer, "aaa");
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
try {
|
||||
reader.deleteDocument(1);
|
||||
fail("did not hit exception when deleting an invalid doc number");
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
// expected
|
||||
}
|
||||
reader.close();
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
fail("write lock is still held after close");
|
||||
}
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(1, "content", sim.encodeNormValue(2.0f));
|
||||
fail("did not hit exception when calling setNorm on an invalid doc number");
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
// expected
|
||||
}
|
||||
reader.close();
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
fail("write lock is still held after close");
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testOpenReaderAfterDelete() throws IOException {
|
||||
File dirFile = _TestUtil.getTempDir("deletetest");
|
||||
Directory dir = newFSDirectory(dirFile);
|
||||
try {
|
||||
IndexReader.open(dir, false);
|
||||
IndexReader.open(dir);
|
||||
fail("expected FileNotFoundException");
|
||||
} catch (FileNotFoundException e) {
|
||||
// expected
|
||||
|
@ -764,7 +447,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
// Make sure we still get a CorruptIndexException (not NPE):
|
||||
try {
|
||||
IndexReader.open(dir, false);
|
||||
IndexReader.open(dir);
|
||||
fail("expected FileNotFoundException");
|
||||
} catch (FileNotFoundException e) {
|
||||
// expected
|
||||
|
@ -946,7 +629,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(d);
|
||||
IndexReader r = IndexReader.open(d, false);
|
||||
IndexReader r = IndexReader.open(d);
|
||||
IndexCommit c = r.getIndexCommit();
|
||||
|
||||
assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
|
||||
|
@ -987,96 +670,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
d.close();
|
||||
}
|
||||
|
||||
public void testReadOnly() throws Throwable {
|
||||
Directory d = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
writer.commit();
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
IndexReader r = IndexReader.open(d, true);
|
||||
try {
|
||||
r.deleteDocument(0);
|
||||
fail();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
// Make sure reopen is still readonly:
|
||||
IndexReader r2 = IndexReader.openIfChanged(r);
|
||||
assertNotNull(r2);
|
||||
r.close();
|
||||
|
||||
assertFalse(r == r2);
|
||||
|
||||
try {
|
||||
r2.deleteDocument(0);
|
||||
fail();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
// Make sure reopen to a single segment is still readonly:
|
||||
IndexReader r3 = IndexReader.openIfChanged(r2);
|
||||
assertNotNull(r3);
|
||||
assertFalse(r3 == r2);
|
||||
r2.close();
|
||||
|
||||
assertFalse(r == r2);
|
||||
|
||||
try {
|
||||
r3.deleteDocument(0);
|
||||
fail();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// Make sure write lock isn't held
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random))
|
||||
.setOpenMode(OpenMode.APPEND));
|
||||
writer.close();
|
||||
|
||||
r3.close();
|
||||
d.close();
|
||||
}
|
||||
|
||||
|
||||
// LUCENE-1474
|
||||
public void testIndexReader() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
writer.addDocument(createDocument("c"));
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocuments(new Term("id", "a"));
|
||||
reader.flush();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.close();
|
||||
IndexReader.open(dir,true).close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
static Document createDocument(String id) {
|
||||
Document doc = new Document();
|
||||
FieldType customType = new FieldType(TextField.TYPE_STORED);
|
||||
|
@ -1093,7 +686,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testNoDir() throws Throwable {
|
||||
Directory dir = newFSDirectory(_TestUtil.getTempDir("doesnotexist"));
|
||||
try {
|
||||
IndexReader.open(dir, true);
|
||||
IndexReader.open(dir);
|
||||
fail("did not hit expected exception");
|
||||
} catch (NoSuchDirectoryException nsde) {
|
||||
// expected
|
||||
|
@ -1138,7 +731,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.close();
|
||||
|
||||
// Open reader
|
||||
IndexReader r = getOnlySegmentReader(IndexReader.open(dir, false));
|
||||
IndexReader r = getOnlySegmentReader(IndexReader.open(dir));
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r, "number", false);
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
@ -1173,7 +766,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number", false);
|
||||
assertEquals(1, ints.length);
|
||||
|
@ -1207,7 +800,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
assertEquals(36, r1.getUniqueTermCount());
|
||||
writer.addDocument(doc);
|
||||
|
@ -1237,7 +830,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, null, true, -1);
|
||||
IndexReader r = IndexReader.open(dir, -1);
|
||||
try {
|
||||
r.docFreq(new Term("field", "f"));
|
||||
fail("did not hit expected exception");
|
||||
|
@ -1282,7 +875,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.commit();
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
assertTrue(r.isCurrent());
|
||||
writer.addDocument(doc);
|
||||
writer.prepareCommit();
|
||||
|
|
|
@ -17,14 +17,11 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Tests cloning multiple types of readers, modifying the liveDocs and norms
|
||||
|
@ -32,468 +29,10 @@ import org.apache.lucene.util.Bits;
|
|||
* implemented properly
|
||||
*/
|
||||
public class TestIndexReaderClone extends LuceneTestCase {
|
||||
|
||||
public void testCloneReadOnlySegmentReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader reader = IndexReader.open(dir1, false);
|
||||
IndexReader readOnlyReader = reader.clone(true);
|
||||
if (!isReadOnly(readOnlyReader)) {
|
||||
fail("reader isn't read only");
|
||||
}
|
||||
if (deleteWorked(1, readOnlyReader)) {
|
||||
fail("deleting from the original should not have worked");
|
||||
}
|
||||
reader.close();
|
||||
readOnlyReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, clone to non-readOnly
|
||||
// reader2, make sure we can change reader2
|
||||
public void testCloneNoChangesStillReadOnly() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = r1.clone(false);
|
||||
if (!deleteWorked(1, r2)) {
|
||||
fail("deleting from the cloned should have worked");
|
||||
}
|
||||
r1.close();
|
||||
r2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, clone to non-readOnly
|
||||
// reader2, make sure we can change reader1
|
||||
public void testCloneWriteToOrig() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = r1.clone(false);
|
||||
if (!deleteWorked(1, r1)) {
|
||||
fail("deleting from the original should have worked");
|
||||
}
|
||||
r1.close();
|
||||
r2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, clone to non-readOnly
|
||||
// reader2, make sure we can change reader2
|
||||
public void testCloneWriteToClone() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = r1.clone(false);
|
||||
if (!deleteWorked(1, r2)) {
|
||||
fail("deleting from the original should have worked");
|
||||
}
|
||||
// should fail because reader1 holds the write lock
|
||||
assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
|
||||
r2.close();
|
||||
// should fail because we are now stale (reader1
|
||||
// committed changes)
|
||||
assertTrue("first reader should not be able to delete", !deleteWorked(1, r1));
|
||||
r1.close();
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// create single-segment index, open non-readOnly
|
||||
// SegmentReader, add docs, reopen to multireader, then do
|
||||
// delete
|
||||
public void testReopenSegmentReaderToMultiReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
|
||||
TestIndexReaderReopen.modifyIndex(5, dir1);
|
||||
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
assertTrue(reader1 != reader2);
|
||||
|
||||
assertTrue(deleteWorked(1, reader2));
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, clone to readOnly reader2
|
||||
public void testCloneWriteableToReadOnly() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader = IndexReader.open(dir1, false);
|
||||
IndexReader readOnlyReader = reader.clone(true);
|
||||
if (!isReadOnly(readOnlyReader)) {
|
||||
fail("reader isn't read only");
|
||||
}
|
||||
if (deleteWorked(1, readOnlyReader)) {
|
||||
fail("deleting from the original should not have worked");
|
||||
}
|
||||
// this readonly reader shouldn't have a write lock
|
||||
if (readOnlyReader.hasChanges) {
|
||||
fail("readOnlyReader has a write lock");
|
||||
}
|
||||
reader.close();
|
||||
readOnlyReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, reopen to readOnly reader2
|
||||
public void testReopenWriteableToReadOnly() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader = IndexReader.open(dir1, false);
|
||||
final int docCount = reader.numDocs();
|
||||
assertTrue(deleteWorked(1, reader));
|
||||
assertEquals(docCount-1, reader.numDocs());
|
||||
|
||||
IndexReader readOnlyReader = IndexReader.openIfChanged(reader, true);
|
||||
assertNotNull(readOnlyReader);
|
||||
if (!isReadOnly(readOnlyReader)) {
|
||||
fail("reader isn't read only");
|
||||
}
|
||||
assertFalse(deleteWorked(1, readOnlyReader));
|
||||
assertEquals(docCount-1, readOnlyReader.numDocs());
|
||||
reader.close();
|
||||
readOnlyReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open readOnly reader1, clone to non-readOnly reader2
|
||||
public void testCloneReadOnlyToWriteable() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader1 = IndexReader.open(dir1, true);
|
||||
|
||||
IndexReader reader2 = reader1.clone(false);
|
||||
if (isReadOnly(reader2)) {
|
||||
fail("reader should not be read only");
|
||||
}
|
||||
assertFalse("deleting from the original reader should not have worked", deleteWorked(1, reader1));
|
||||
// this readonly reader shouldn't yet have a write lock
|
||||
if (reader2.hasChanges) {
|
||||
fail("cloned reader should not have write lock");
|
||||
}
|
||||
assertTrue("deleting from the cloned reader should have worked", deleteWorked(1, reader2));
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1 on multi-segment index, then
|
||||
// fully merge the index, then clone to readOnly reader2
|
||||
public void testReadOnlyCloneAfterFullMerge() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.forceMerge(1);
|
||||
w.close();
|
||||
IndexReader reader2 = reader1.clone(true);
|
||||
assertTrue(isReadOnly(reader2));
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private static boolean deleteWorked(int doc, IndexReader r) {
|
||||
boolean exception = false;
|
||||
try {
|
||||
// trying to delete from the original reader should throw an exception
|
||||
r.deleteDocument(doc);
|
||||
} catch (Exception ex) {
|
||||
exception = true;
|
||||
}
|
||||
return !exception;
|
||||
}
|
||||
|
||||
public void testCloneReadOnlyDirectoryReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader = IndexReader.open(dir1, false);
|
||||
IndexReader readOnlyReader = reader.clone(true);
|
||||
if (!isReadOnly(readOnlyReader)) {
|
||||
fail("reader isn't read only");
|
||||
}
|
||||
reader.close();
|
||||
readOnlyReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public static boolean isReadOnly(IndexReader r) {
|
||||
if (r instanceof SegmentReader) {
|
||||
return ((SegmentReader) r).readOnly;
|
||||
} else if (r instanceof DirectoryReader) {
|
||||
return ((DirectoryReader) r).readOnly;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public void testParallelReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir2, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = IndexReader.open(dir2, false);
|
||||
|
||||
ParallelReader pr1 = new ParallelReader();
|
||||
pr1.add(r1);
|
||||
pr1.add(r2);
|
||||
|
||||
performDefaultTests(pr1);
|
||||
pr1.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Get a norm from the original reader 2. Clone the original reader 3.
|
||||
* Delete a document and set the norm of the cloned reader 4. Verify the norms
|
||||
* are not the same on each reader 5. Verify the doc deleted is only in the
|
||||
* cloned reader 6. Try to delete a document in the original reader, an
|
||||
* exception should be thrown
|
||||
*
|
||||
* @param r1 IndexReader to perform tests on
|
||||
* @throws Exception
|
||||
*/
|
||||
private void performDefaultTests(IndexReader r1) throws Exception {
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
float norm1 = sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
|
||||
|
||||
IndexReader pr1Clone = (IndexReader) r1.clone();
|
||||
pr1Clone.deleteDocument(10);
|
||||
pr1Clone.setNorm(4, "field1", sim.encodeNormValue(0.5f));
|
||||
assertTrue(sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1);
|
||||
assertTrue(sim.decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1);
|
||||
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertTrue(liveDocs == null || liveDocs.get(10));
|
||||
assertFalse(MultiFields.getLiveDocs(pr1Clone).get(10));
|
||||
|
||||
// try to update the original reader, which should throw an exception
|
||||
try {
|
||||
r1.deleteDocument(11);
|
||||
fail("Tried to delete doc 11 and an exception should have been thrown");
|
||||
} catch (Exception exception) {
|
||||
// expectted
|
||||
}
|
||||
pr1Clone.close();
|
||||
}
|
||||
|
||||
public void testMixedReaders() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir2, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = IndexReader.open(dir2, false);
|
||||
|
||||
MultiReader multiReader = new MultiReader(r1, r2);
|
||||
performDefaultTests(multiReader);
|
||||
multiReader.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
public void testSegmentReaderUndeleteall() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
origSegmentReader.deleteDocument(10);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
origSegmentReader.undeleteAll();
|
||||
assertNull(origSegmentReader.liveDocsRef);
|
||||
origSegmentReader.close();
|
||||
// need to test norms?
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testSegmentReaderCloseReferencing() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
origSegmentReader.deleteDocument(1);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
origSegmentReader.setNorm(4, "field1", sim.encodeNormValue(0.5f));
|
||||
|
||||
SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
|
||||
.clone();
|
||||
assertDelDocsRefCountEquals(2, origSegmentReader);
|
||||
origSegmentReader.close();
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
// check the norm refs
|
||||
SegmentNorms norm = clonedSegmentReader.norms.get("field1");
|
||||
assertEquals(1, norm.bytesRef().get());
|
||||
clonedSegmentReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testSegmentReaderDelDocsReferenceCounting() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
|
||||
IndexReader origReader = IndexReader.open(dir1, false);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(origReader);
|
||||
// liveDocsRef should be null because nothing has updated yet
|
||||
assertNull(origSegmentReader.liveDocsRef);
|
||||
|
||||
// we deleted a document, so there is now a liveDocs bitvector and a
|
||||
// reference to it
|
||||
origReader.deleteDocument(1);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
|
||||
// the cloned segmentreader should have 2 references, 1 to itself, and 1 to
|
||||
// the original segmentreader
|
||||
IndexReader clonedReader = (IndexReader) origReader.clone();
|
||||
SegmentReader clonedSegmentReader = getOnlySegmentReader(clonedReader);
|
||||
assertDelDocsRefCountEquals(2, origSegmentReader);
|
||||
// deleting a document creates a new liveDocs bitvector, the refs goes to
|
||||
// 1
|
||||
clonedReader.deleteDocument(2);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
assertDelDocsRefCountEquals(1, clonedSegmentReader);
|
||||
|
||||
// make sure the deletedocs objects are different (copy
|
||||
// on write)
|
||||
assertTrue(origSegmentReader.liveDocs != clonedSegmentReader.liveDocs);
|
||||
|
||||
assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
|
||||
final Bits liveDocs = origSegmentReader.getLiveDocs();
|
||||
assertTrue(liveDocs == null || liveDocs.get(2)); // doc 2 should not be deleted
|
||||
// in original segmentreader
|
||||
assertFalse(clonedSegmentReader.getLiveDocs().get(2)); // doc 2 should be deleted in
|
||||
// cloned segmentreader
|
||||
|
||||
// deleting a doc from the original segmentreader should throw an exception
|
||||
try {
|
||||
origReader.deleteDocument(4);
|
||||
fail("expected exception");
|
||||
} catch (LockObtainFailedException lbfe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
origReader.close();
|
||||
// try closing the original segment reader to see if it affects the
|
||||
// clonedSegmentReader
|
||||
clonedReader.deleteDocument(3);
|
||||
clonedReader.flush();
|
||||
assertDelDocsRefCountEquals(1, clonedSegmentReader);
|
||||
|
||||
// test a reopened reader
|
||||
IndexReader reopenedReader = IndexReader.openIfChanged(clonedReader);
|
||||
if (reopenedReader == null) {
|
||||
reopenedReader = clonedReader;
|
||||
}
|
||||
IndexReader cloneReader2 = (IndexReader) reopenedReader.clone();
|
||||
SegmentReader cloneSegmentReader2 = getOnlySegmentReader(cloneReader2);
|
||||
assertDelDocsRefCountEquals(2, cloneSegmentReader2);
|
||||
clonedReader.close();
|
||||
reopenedReader.close();
|
||||
cloneReader2.close();
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// LUCENE-1648
|
||||
public void testCloneWithDeletes() throws Throwable {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader origReader = IndexReader.open(dir1, false);
|
||||
origReader.deleteDocument(1);
|
||||
|
||||
IndexReader clonedReader = (IndexReader) origReader.clone();
|
||||
origReader.close();
|
||||
clonedReader.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir1, false);
|
||||
assertFalse(MultiFields.getLiveDocs(r).get(1));
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// LUCENE-1648
|
||||
public void testCloneWithSetNorm() throws Throwable {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader orig = IndexReader.open(dir1, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
orig.setNorm(1, "field1", sim.encodeNormValue(17.0f));
|
||||
final byte encoded = sim.encodeNormValue(17.0f);
|
||||
assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]);
|
||||
|
||||
// the cloned segmentreader should have 2 references, 1 to itself, and 1 to
|
||||
// the original segmentreader
|
||||
IndexReader clonedReader = (IndexReader) orig.clone();
|
||||
orig.close();
|
||||
clonedReader.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir1, false);
|
||||
assertEquals(encoded, MultiNorms.norms(r, "field1")[1]);
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
|
||||
int doc) {
|
||||
assertEquals(reader.getLiveDocs().get(doc), reader2.getLiveDocs().get(doc));
|
||||
}
|
||||
|
||||
private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
|
||||
assertEquals(refCount, reader.liveDocsRef.get());
|
||||
}
|
||||
|
||||
public void testCloneSubreaders() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
IndexReader reader = IndexReader.open(dir1, false);
|
||||
reader.deleteDocument(1); // acquire write lock
|
||||
IndexReader[] subs = reader.getSequentialSubReaders();
|
||||
assert subs.length > 1;
|
||||
|
||||
IndexReader[] clones = new IndexReader[subs.length];
|
||||
for (int x=0; x < subs.length; x++) {
|
||||
clones[x] = (IndexReader) subs[x].clone();
|
||||
}
|
||||
reader.close();
|
||||
for (int x=0; x < subs.length; x++) {
|
||||
clones[x].close();
|
||||
}
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testLucene1516Bug() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
r1.incRef();
|
||||
IndexReader r2 = r1.clone(false);
|
||||
r1.deleteDocument(5);
|
||||
r1.decRef();
|
||||
|
||||
r1.incRef();
|
||||
|
||||
r2.close();
|
||||
r1.decRef();
|
||||
r1.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testCloseStoredFields() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
|
@ -506,8 +45,8 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
doc.add(newField("field", "yes it's stored", TextField.TYPE_STORED));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
IndexReader r1 = IndexReader.open(dir, false);
|
||||
IndexReader r2 = r1.clone(false);
|
||||
IndexReader r1 = IndexReader.open(dir);
|
||||
IndexReader r2 = (IndexReader) r1.clone();
|
||||
r1.close();
|
||||
r2.close();
|
||||
dir.close();
|
||||
|
|
|
@ -1,362 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests cloning IndexReader norms
|
||||
*/
|
||||
public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
||||
|
||||
private class SimilarityProviderOne extends DefaultSimilarityProvider {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
// diable length norm
|
||||
return encodeNormValue(state.getBoost());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static final int NUM_FIELDS = 10;
|
||||
|
||||
private SimilarityProvider similarityProviderOne;
|
||||
|
||||
private Analyzer anlzr;
|
||||
|
||||
private int numDocNorms;
|
||||
|
||||
private ArrayList<Float> norms;
|
||||
|
||||
private ArrayList<Float> modifiedNorms;
|
||||
|
||||
private float lastNorm = 0;
|
||||
|
||||
private float normDelta = (float) 0.001;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
similarityProviderOne = new SimilarityProviderOne();
|
||||
anlzr = new MockAnalyzer(random);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that norms values are preserved as the index is maintained. Including
|
||||
* separate norms. Including merging indexes with seprate norms. Including
|
||||
* full merge.
|
||||
*/
|
||||
public void testNorms() throws IOException {
|
||||
// test with a single index: index1
|
||||
Directory dir1 = newDirectory();
|
||||
IndexWriter.unlock(dir1);
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
|
||||
createIndex(random, dir1);
|
||||
doTestNorms(random, dir1);
|
||||
|
||||
// test with a single index: index2
|
||||
ArrayList<Float> norms1 = norms;
|
||||
ArrayList<Float> modifiedNorms1 = modifiedNorms;
|
||||
int numDocNorms1 = numDocNorms;
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
numDocNorms = 0;
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
|
||||
createIndex(random, dir2);
|
||||
doTestNorms(random, dir2);
|
||||
|
||||
// add index1 and index2 to a third index: index3
|
||||
Directory dir3 = newDirectory();
|
||||
|
||||
createIndex(random, dir3);
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now addIndexes/full merge");
|
||||
}
|
||||
IndexWriter iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.addIndexes(dir1, dir2);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
|
||||
norms1.addAll(norms);
|
||||
norms = norms1;
|
||||
modifiedNorms1.addAll(modifiedNorms);
|
||||
modifiedNorms = modifiedNorms1;
|
||||
numDocNorms += numDocNorms1;
|
||||
|
||||
// test with index3
|
||||
verifyIndex(dir3);
|
||||
doTestNorms(random, dir3);
|
||||
|
||||
// now with full merge
|
||||
iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
verifyIndex(dir3);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
dir3.close();
|
||||
}
|
||||
|
||||
// try cloning and reopening the norms
|
||||
private void doTestNorms(Random random, Directory dir) throws IOException {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now doTestNorms");
|
||||
}
|
||||
addDocs(random, dir, 12, true);
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
verifyIndex(ir);
|
||||
modifyNormsForF1(ir);
|
||||
IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir, false);//ir.clone();
|
||||
verifyIndex(irc);
|
||||
|
||||
modifyNormsForF1(irc);
|
||||
|
||||
IndexReader irc3 = (IndexReader) irc.clone();
|
||||
verifyIndex(irc3);
|
||||
modifyNormsForF1(irc3);
|
||||
verifyIndex(irc3);
|
||||
irc3.flush();
|
||||
|
||||
ir.close();
|
||||
irc.close();
|
||||
irc3.close();
|
||||
}
|
||||
|
||||
public void testNormsClose() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader reader1 = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
reader1.norms("field1");
|
||||
SegmentNorms r1norm = reader1.norms.get("field1");
|
||||
AtomicInteger r1BytesRef = r1norm.bytesRef();
|
||||
SegmentReader reader2 = (SegmentReader)reader1.clone();
|
||||
assertEquals(2, r1norm.bytesRef().get());
|
||||
reader1.close();
|
||||
assertEquals(1, r1BytesRef.get());
|
||||
reader2.norms("field1");
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testNormsRefCounting() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
|
||||
IndexReader reader2C = (IndexReader) reader1.clone();
|
||||
SegmentReader segmentReader2C = getOnlySegmentReader(reader2C);
|
||||
segmentReader2C.norms("field1"); // load the norms for the field
|
||||
SegmentNorms reader2CNorm = segmentReader2C.norms.get("field1");
|
||||
assertTrue("reader2CNorm.bytesRef()=" + reader2CNorm.bytesRef(), reader2CNorm.bytesRef().get() == 2);
|
||||
|
||||
|
||||
|
||||
IndexReader reader3C = (IndexReader) reader2C.clone();
|
||||
SegmentReader segmentReader3C = getOnlySegmentReader(reader3C);
|
||||
SegmentNorms reader3CCNorm = segmentReader3C.norms.get("field1");
|
||||
assertEquals(3, reader3CCNorm.bytesRef().get());
|
||||
|
||||
// edit a norm and the refcount should be 1
|
||||
IndexReader reader4C = (IndexReader) reader3C.clone();
|
||||
SegmentReader segmentReader4C = getOnlySegmentReader(reader4C);
|
||||
assertEquals(4, reader3CCNorm.bytesRef().get());
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader4C.setNorm(5, "field1", sim.encodeNormValue(0.33f));
|
||||
|
||||
// generate a cannot update exception in reader1
|
||||
try {
|
||||
reader3C.setNorm(1, "field1", sim.encodeNormValue(0.99f));
|
||||
fail("did not hit expected exception");
|
||||
} catch (Exception ex) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// norm values should be different
|
||||
assertTrue(sim.decodeNormValue(segmentReader3C.norms("field1")[5])
|
||||
!= sim.decodeNormValue(segmentReader4C.norms("field1")[5]));
|
||||
SegmentNorms reader4CCNorm = segmentReader4C.norms.get("field1");
|
||||
assertEquals(3, reader3CCNorm.bytesRef().get());
|
||||
assertEquals(1, reader4CCNorm.bytesRef().get());
|
||||
|
||||
IndexReader reader5C = (IndexReader) reader4C.clone();
|
||||
SegmentReader segmentReader5C = getOnlySegmentReader(reader5C);
|
||||
SegmentNorms reader5CCNorm = segmentReader5C.norms.get("field1");
|
||||
reader5C.setNorm(5, "field1", sim.encodeNormValue(0.7f));
|
||||
assertEquals(1, reader5CCNorm.bytesRef().get());
|
||||
|
||||
reader5C.close();
|
||||
reader4C.close();
|
||||
reader3C.close();
|
||||
reader2C.close();
|
||||
reader1.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void createIndex(Random random, Directory dir) throws IOException {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: createIndex");
|
||||
}
|
||||
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(true);
|
||||
iw.close();
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: done createIndex");
|
||||
}
|
||||
}
|
||||
|
||||
private void modifyNormsForF1(IndexReader ir) throws IOException {
|
||||
int n = ir.maxDoc();
|
||||
// System.out.println("modifyNormsForF1 maxDoc: "+n);
|
||||
for (int i = 0; i < n; i += 3) { // modify for every third doc
|
||||
int k = (i * 3) % modifiedNorms.size();
|
||||
float origNorm = modifiedNorms.get(i).floatValue();
|
||||
float newNorm = modifiedNorms.get(k).floatValue();
|
||||
// System.out.println("Modifying: for "+i+" from "+origNorm+" to
|
||||
// "+newNorm);
|
||||
// System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
|
||||
modifiedNorms.set(i, Float.valueOf(newNorm));
|
||||
modifiedNorms.set(k, Float.valueOf(origNorm));
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
ir.setNorm(i, "f" + 1, sim.encodeNormValue(newNorm));
|
||||
ir.setNorm(k, "f" + 1, sim.encodeNormValue(origNorm));
|
||||
// System.out.println("setNorm i: "+i);
|
||||
// break;
|
||||
}
|
||||
// ir.close();
|
||||
}
|
||||
|
||||
private void verifyIndex(Directory dir) throws IOException {
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
verifyIndex(ir);
|
||||
ir.close();
|
||||
}
|
||||
|
||||
private void verifyIndex(IndexReader ir) throws IOException {
|
||||
for (int i = 0; i < NUM_FIELDS; i++) {
|
||||
String field = "f" + i;
|
||||
byte b[] = MultiNorms.norms(ir, field);
|
||||
assertEquals("number of norms mismatches", numDocNorms, b.length);
|
||||
ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
|
||||
for (int j = 0; j < b.length; j++) {
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
float norm = sim.decodeNormValue(b[j]);
|
||||
float norm1 = storedNorms.get(j).floatValue();
|
||||
assertEquals("stored norm value of " + field + " for doc " + j + " is "
|
||||
+ norm + " - a mismatch!", norm, norm1, 0.000001);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addDocs(Random random, Directory dir, int ndocs, boolean compound)
|
||||
throws IOException {
|
||||
IndexWriterConfig conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy());
|
||||
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(compound);
|
||||
IndexWriter iw = new IndexWriter(dir, conf);
|
||||
for (int i = 0; i < ndocs; i++) {
|
||||
iw.addDocument(newDoc());
|
||||
}
|
||||
iw.close();
|
||||
}
|
||||
|
||||
// create the next document
|
||||
private Document newDoc() {
|
||||
Document d = new Document();
|
||||
float boost = nextNorm("anyfield"); // in this test the same similarity is used for all fields so it does not matter what field is passed
|
||||
|
||||
FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
|
||||
customType.setTokenized(false);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Field f = newField("f" + i, "v" + i, customType);
|
||||
f.setBoost(boost);
|
||||
d.add(f);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
// return unique norm values that are unchanged by encoding/decoding
|
||||
private float nextNorm(String fname) {
|
||||
float norm = lastNorm + normDelta;
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
do {
|
||||
float norm1 = sim.decodeNormValue(
|
||||
sim.encodeNormValue(norm));
|
||||
if (norm1 > lastNorm) {
|
||||
// System.out.println(norm1+" > "+lastNorm);
|
||||
norm = norm1;
|
||||
break;
|
||||
}
|
||||
norm += normDelta;
|
||||
} while (true);
|
||||
norms.add(numDocNorms, Float.valueOf(norm));
|
||||
modifiedNorms.add(numDocNorms, Float.valueOf(norm));
|
||||
// System.out.println("creating norm("+numDocNorms+"): "+norm);
|
||||
numDocNorms++;
|
||||
lastNorm = (norm > 10 ? 0 : norm); // there's a limit to how many distinct
|
||||
// values can be stored in a ingle byte
|
||||
return norm;
|
||||
}
|
||||
}
|
|
@ -1,375 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import static org.apache.lucene.index.TestIndexReader.addDoc;
|
||||
import static org.apache.lucene.index.TestIndexReader.addDocumentWithFields;
|
||||
import static org.apache.lucene.index.TestIndexReader.assertTermDocsCount;
|
||||
import static org.apache.lucene.index.TestIndexReader.createDocument;
|
||||
|
||||
public class TestIndexReaderDelete extends LuceneTestCase {
|
||||
private void deleteReaderReaderConflict(boolean doFullMerge) throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm1 = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
Term searchTerm3 = new Term("content", "ccc");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
// add 100 documents with term : bbb
|
||||
// add 100 documents with term : ccc
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm1.text());
|
||||
addDoc(writer, searchTerm2.text());
|
||||
addDoc(writer, searchTerm3.text());
|
||||
}
|
||||
if (doFullMerge) {
|
||||
writer.forceMerge(1);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN TWO READERS
|
||||
// Both readers get segment info as exists at this time
|
||||
IndexReader reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("first opened", reader2, searchTerm1, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("first opened", reader2, searchTerm3, 100);
|
||||
|
||||
// DELETE DOCS FROM READER 2 and CLOSE IT
|
||||
// delete documents containing term: aaa
|
||||
// when the reader is closed, the segment info is updated and
|
||||
// the first reader is now stale
|
||||
reader2.deleteDocuments(searchTerm1);
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
// Make sure reader 1 is unchanged since it was open earlier
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
|
||||
|
||||
|
||||
// ATTEMPT TO DELETE FROM STALE READER
|
||||
// delete documents containing term: bbb
|
||||
try {
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
fail("Delete allowed from a stale index reader");
|
||||
} catch (IOException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// RECREATE READER AND TRY AGAIN
|
||||
reader1.close();
|
||||
reader1 = IndexReader.open(dir, false);
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("reopened", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
|
||||
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
|
||||
|
||||
reader1.deleteDocuments(searchTerm2);
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
|
||||
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
|
||||
assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
|
||||
reader1.close();
|
||||
|
||||
// Open another reader to confirm that everything is deleted
|
||||
reader2 = IndexReader.open(dir, false);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
|
||||
assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
|
||||
reader2.close();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void deleteReaderWriterConflict(boolean doFullMerge) throws IOException {
|
||||
//Directory dir = new RAMDirectory();
|
||||
Directory dir = newDirectory();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Term searchTerm2 = new Term("content", "bbb");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
// add 100 documents with term : bbb
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm2.text());
|
||||
}
|
||||
|
||||
// REQUEST full merge
|
||||
// This causes a new segment to become current for all subsequent
|
||||
// searchers. Because of this, deletions made via a previously open
|
||||
// reader, which would be applied to that reader's segment, are lost
|
||||
// for subsequent searchers/readers
|
||||
if (doFullMerge) {
|
||||
writer.forceMerge(1);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// The reader should not see the new data
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
// NOTE: the reader was created when only "aaa" documents were in
|
||||
int deleted = 0;
|
||||
try {
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
fail("Delete allowed on an index reader with stale segment information");
|
||||
} catch (StaleReaderException e) {
|
||||
/* success */
|
||||
}
|
||||
|
||||
// Re-open index reader and try again. This time it should see
|
||||
// the new data.
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
assertTermDocsCount("first reader", reader, searchTerm2, 100);
|
||||
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testBasicDelete() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
addDoc(writer, searchTerm.text());
|
||||
}
|
||||
writer.close();
|
||||
|
||||
// OPEN READER AT THIS POINT - this should fix the view of the
|
||||
// index at the point of having 100 "aaa" documents and 0 "bbb"
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("first reader", reader, searchTerm, 100);
|
||||
reader.close();
|
||||
|
||||
// DELETE DOCUMENTS CONTAINING TERM: aaa
|
||||
int deleted = 0;
|
||||
reader = IndexReader.open(dir, false);
|
||||
deleted = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("deleted count", 100, deleted);
|
||||
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
|
||||
// open a 2nd reader to make sure first reader can
|
||||
// commit its changes (.del) while second reader
|
||||
// is open:
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
reader.close();
|
||||
|
||||
// CREATE A NEW READER and re-test
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals("deleted docFreq", 0, reader.docFreq(searchTerm));
|
||||
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
|
||||
reader.close();
|
||||
reader2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictNoFullMerge() throws IOException {
|
||||
deleteReaderReaderConflict(false);
|
||||
}
|
||||
|
||||
public void testDeleteReaderReaderConflictFullMerge() throws IOException {
|
||||
deleteReaderReaderConflict(true);
|
||||
}
|
||||
|
||||
public void testDeleteReaderWriterConflictNoFullMerge() throws IOException {
|
||||
deleteReaderWriterConflict(false);
|
||||
}
|
||||
|
||||
public void testDeleteReaderWriterConflictFullMerge() throws IOException {
|
||||
deleteReaderWriterConflict(true);
|
||||
}
|
||||
|
||||
public void testMultiReaderDeletes() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
Document doc = new Document();
|
||||
doc.add(newField("f", "doctor", StringField.TYPE_UNSTORED));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
w.commit();
|
||||
doc.add(newField("f", "who", StringField.TYPE_UNSTORED));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = new SlowMultiReaderWrapper(w.getReader());
|
||||
w.close();
|
||||
|
||||
assertNull(r.getLiveDocs());
|
||||
r.close();
|
||||
|
||||
r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
|
||||
|
||||
assertNull(r.getLiveDocs());
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
|
||||
assertNotNull(r.getLiveDocs());
|
||||
assertFalse(r.getLiveDocs().get(0));
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
|
||||
assertFalse(r.getLiveDocs().get(1));
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAll() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.deleteDocument(1);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterClose() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(0);
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.undeleteAll();
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1647
|
||||
public void testIndexReaderUnDeleteAll() throws Exception {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
writer.addDocument(createDocument("c"));
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocuments(new Term("id", "a"));
|
||||
reader.flush();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.undeleteAll();
|
||||
reader.deleteDocuments(new Term("id", "b"));
|
||||
reader.close();
|
||||
IndexReader.open(dir,true).close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -1,229 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReaderOnDiskFull extends LuceneTestCase {
|
||||
/**
|
||||
* Make sure if reader tries to commit but hits disk
|
||||
* full that reader remains consistent and usable.
|
||||
*/
|
||||
public void testDiskFull() throws IOException {
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
int START_COUNT = 157;
|
||||
int END_COUNT = 144;
|
||||
|
||||
// First build up a starting index:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: create initial index");
|
||||
}
|
||||
for(int i=0;i<157;i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", Integer.toString(i), StringField.TYPE_STORED));
|
||||
d.add(newField("content", "aaa " + i, TextField.TYPE_UNSTORED));
|
||||
writer.addDocument(d);
|
||||
if (0==i%10)
|
||||
writer.commit();
|
||||
}
|
||||
writer.close();
|
||||
|
||||
{
|
||||
IndexReader r = IndexReader.open(startDir);
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("exception when init searching: " + e);
|
||||
}
|
||||
searcher.close();
|
||||
r.close();
|
||||
}
|
||||
|
||||
long diskUsage = startDir.getRecomputedActualSizeInBytes();
|
||||
long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
|
||||
|
||||
IOException err = null;
|
||||
|
||||
boolean done = false;
|
||||
boolean gotExc = false;
|
||||
|
||||
// Iterate w/ ever increasing free disk space:
|
||||
while(!done) {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random)));
|
||||
|
||||
// If IndexReader hits disk full, it can write to
|
||||
// the same files again.
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// For each disk size, first try to commit against
|
||||
// dir that will hit random IOExceptions & disk
|
||||
// full; after, give it infinite disk space & turn
|
||||
// off random IOExceptions & retry w/ same reader:
|
||||
boolean success = false;
|
||||
|
||||
for(int x=0;x<2;x++) {
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
String testName;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: " + diskFree + " bytes");
|
||||
}
|
||||
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: same writer: unlimited disk space");
|
||||
}
|
||||
testName = "reader re-use after disk full";
|
||||
}
|
||||
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
if (0 == x) {
|
||||
int docId = 12;
|
||||
for(int i=0;i<13;i++) {
|
||||
reader.deleteDocument(docId);
|
||||
reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
|
||||
docId += 12;
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
success = true;
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
err = e;
|
||||
gotExc = true;
|
||||
if (1 == x) {
|
||||
e.printStackTrace();
|
||||
fail(testName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs changed, and if
|
||||
// we failed, we see either all docs or no docs
|
||||
// changed (transactional semantics):
|
||||
IndexReader newReader = null;
|
||||
try {
|
||||
newReader = IndexReader.open(dir, false);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
|
||||
}
|
||||
/*
|
||||
int result = newReader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
IndexSearcher searcher = newSearcher(newReader);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != START_COUNT && result2 != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
newReader.close();
|
||||
|
||||
if (result2 == END_COUNT) {
|
||||
if (!gotExc)
|
||||
fail("never hit disk full");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with more bytes of free space:
|
||||
diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 5, 20) : _TestUtil.nextInt(random, 50, 200);
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
}
|
||||
}
|
|
@ -62,7 +62,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir1, false);
|
||||
return IndexReader.open(dir1);
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -80,7 +80,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir2, false);
|
||||
return IndexReader.open(dir2);
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -104,8 +104,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
pr.add(IndexReader.open(dir1));
|
||||
pr.add(IndexReader.open(dir2));
|
||||
return pr;
|
||||
}
|
||||
|
||||
|
@ -129,11 +129,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir3, false));
|
||||
pr.add(IndexReader.open(dir4, false));
|
||||
pr.add(IndexReader.open(dir3));
|
||||
pr.add(IndexReader.open(dir4));
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
pr.add(new FilterIndexReader(IndexReader.open(dir3, false)));
|
||||
pr.add(new FilterIndexReader(IndexReader.open(dir3)));
|
||||
return pr;
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(
|
||||
OpenMode.CREATE).setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(newLogMergePolicy()));
|
||||
iwriter.commit();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
try {
|
||||
int M = 3;
|
||||
FieldType customType = new FieldType(TextField.TYPE_STORED);
|
||||
|
@ -200,7 +200,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
} else {
|
||||
// recreate
|
||||
reader.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -226,8 +226,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(IndexReader.open(dir1, false),
|
||||
IndexReader.open(dir2, false));
|
||||
return new MultiReader(IndexReader.open(dir1),
|
||||
IndexReader.open(dir2));
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -251,11 +251,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(IndexReader.open(dir3, false),
|
||||
IndexReader.open(dir4, false),
|
||||
return new MultiReader(IndexReader.open(dir3),
|
||||
IndexReader.open(dir4),
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
new FilterIndexReader(IndexReader.open(dir3, false)));
|
||||
new FilterIndexReader(IndexReader.open(dir3)));
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -279,20 +279,15 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
// only change norms in this index to maintain the same number of docs for each of ParallelReader's subreaders
|
||||
if (i == 1) TestIndexReaderReopen.modifyIndex(i, dir1);
|
||||
|
||||
TestIndexReaderReopen.modifyIndex(i, dir4);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir5);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
MultiReader mr = new MultiReader(IndexReader.open(dir3, false), IndexReader.open(dir4, false));
|
||||
return new MultiReader(pr, mr, IndexReader.open(dir5, false));
|
||||
MultiReader mr1 = new MultiReader(IndexReader.open(dir1), IndexReader.open(dir2));
|
||||
MultiReader mr2 = new MultiReader(IndexReader.open(dir3), IndexReader.open(dir4));
|
||||
return new MultiReader(mr1, mr2, IndexReader.open(dir5));
|
||||
}
|
||||
});
|
||||
dir1.close();
|
||||
|
@ -347,111 +342,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
assertReaderClosed(index1, true, true);
|
||||
assertReaderClosed(index2, true, true);
|
||||
}
|
||||
|
||||
public void testReferenceCounting() throws IOException {
|
||||
for (int mode = 0; mode < 4; mode++) {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, true);
|
||||
|
||||
IndexReader reader0 = IndexReader.open(dir1, false);
|
||||
assertRefCountEquals(1, reader0);
|
||||
|
||||
assertTrue(reader0 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders0 = reader0.getSequentialSubReaders();
|
||||
for (int i = 0; i < subReaders0.length; i++) {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
}
|
||||
|
||||
// delete first document, so that only one of the subReaders have to be re-opened
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(0);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader1 = refreshReader(reader0, true).refreshedReader;
|
||||
assertTrue(reader1 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders1 = reader1.getSequentialSubReaders();
|
||||
assertEquals(subReaders0.length, subReaders1.length);
|
||||
|
||||
for (int i = 0; i < subReaders0.length; i++) {
|
||||
if (subReaders0[i] != subReaders1[i]) {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
assertRefCountEquals(1, subReaders1[i]);
|
||||
} else {
|
||||
assertRefCountEquals(2, subReaders0[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// delete first document, so that only one of the subReaders have to be re-opened
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(1);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader2 = refreshReader(reader1, true).refreshedReader;
|
||||
assertTrue(reader2 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders2 = reader2.getSequentialSubReaders();
|
||||
assertEquals(subReaders1.length, subReaders2.length);
|
||||
|
||||
for (int i = 0; i < subReaders2.length; i++) {
|
||||
if (subReaders2[i] == subReaders1[i]) {
|
||||
if (subReaders1[i] == subReaders0[i]) {
|
||||
assertRefCountEquals(3, subReaders2[i]);
|
||||
} else {
|
||||
assertRefCountEquals(2, subReaders2[i]);
|
||||
}
|
||||
} else {
|
||||
assertRefCountEquals(1, subReaders2[i]);
|
||||
if (subReaders0[i] == subReaders1[i]) {
|
||||
assertRefCountEquals(2, subReaders2[i]);
|
||||
assertRefCountEquals(2, subReaders0[i]);
|
||||
} else {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
assertRefCountEquals(1, subReaders1[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IndexReader reader3 = refreshReader(reader0, true).refreshedReader;
|
||||
assertTrue(reader3 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders3 = reader3.getSequentialSubReaders();
|
||||
assertEquals(subReaders3.length, subReaders0.length);
|
||||
|
||||
// try some permutations
|
||||
switch (mode) {
|
||||
case 0:
|
||||
reader0.close();
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
reader3.close();
|
||||
break;
|
||||
case 1:
|
||||
reader3.close();
|
||||
reader2.close();
|
||||
reader1.close();
|
||||
reader0.close();
|
||||
break;
|
||||
case 2:
|
||||
reader2.close();
|
||||
reader3.close();
|
||||
reader0.close();
|
||||
reader1.close();
|
||||
break;
|
||||
case 3:
|
||||
reader1.close();
|
||||
reader3.close();
|
||||
reader2.close();
|
||||
reader0.close();
|
||||
break;
|
||||
}
|
||||
|
||||
assertReaderClosed(reader0, true, true);
|
||||
assertReaderClosed(reader1, true, true);
|
||||
assertReaderClosed(reader2, true, true);
|
||||
assertReaderClosed(reader3, true, true);
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testReferenceCountingMultiReader() throws IOException {
|
||||
for (int mode = 0; mode <=1; mode++) {
|
||||
|
@ -460,10 +350,10 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
IndexReader reader1 = IndexReader.open(dir1);
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
IndexReader initReader2 = IndexReader.open(dir2, false);
|
||||
IndexReader initReader2 = IndexReader.open(dir2);
|
||||
IndexReader multiReader1 = new MultiReader(new IndexReader[] {reader1, initReader2}, (mode == 0));
|
||||
modifyIndex(0, dir2);
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
@ -527,160 +417,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
public void testReferenceCountingParallelReader() throws IOException {
|
||||
for (int mode = 0; mode <=1; mode++) {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, false);
|
||||
Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
ParallelReader parallelReader1 = new ParallelReader(mode == 0);
|
||||
parallelReader1.add(reader1);
|
||||
IndexReader initReader2 = IndexReader.open(dir2, false);
|
||||
parallelReader1.add(initReader2);
|
||||
modifyIndex(1, dir2);
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
IndexReader parallelReader2 = IndexReader.openIfChanged(parallelReader1);
|
||||
assertNotNull(parallelReader2);
|
||||
assertNull(IndexReader.openIfChanged(parallelReader2));
|
||||
// index1 hasn't changed, so parallelReader2 should share reader1 now with multiReader1
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
modifyIndex(0, dir1);
|
||||
modifyIndex(0, dir2);
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
modifyIndex(4, dir1);
|
||||
IndexReader reader3 = IndexReader.openIfChanged(reader2);
|
||||
assertNotNull(reader3);
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
assertRefCountEquals(1, reader2);
|
||||
|
||||
parallelReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
parallelReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
reader1.close();
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
parallelReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
parallelReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, true);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testNormsRefCounting() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, false);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
SegmentReader segmentReader1 = getOnlySegmentReader(reader1);
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(0);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
modifier.setNorm(1, "field1", sim.encodeNormValue(50f));
|
||||
modifier.setNorm(1, "field2", sim.encodeNormValue(50f));
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader3 = IndexReader.openIfChanged(reader2);
|
||||
assertNotNull(reader3);
|
||||
SegmentReader segmentReader3 = getOnlySegmentReader(reader3);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(2);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader4 = IndexReader.openIfChanged(reader3);
|
||||
assertNotNull(reader4);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(3);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader5 = IndexReader.openIfChanged(reader3);
|
||||
assertNotNull(reader5);
|
||||
|
||||
// Now reader2-reader5 references reader1. reader1 and reader2
|
||||
// share the same norms. reader3, reader4, reader5 also share norms.
|
||||
assertRefCountEquals(1, reader1);
|
||||
assertFalse(segmentReader1.normsClosed());
|
||||
|
||||
reader1.close();
|
||||
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader1.normsClosed());
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
// now the norms for field1 and field2 should be closed
|
||||
assertTrue(segmentReader1.normsClosed("field1"));
|
||||
assertTrue(segmentReader1.normsClosed("field2"));
|
||||
|
||||
// but the norms for field3 and field4 should still be open
|
||||
assertFalse(segmentReader1.normsClosed("field3"));
|
||||
assertFalse(segmentReader1.normsClosed("field4"));
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader3.normsClosed());
|
||||
reader5.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader3.normsClosed());
|
||||
reader4.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
// and now all norms that reader1 used should be closed
|
||||
assertTrue(segmentReader1.normsClosed());
|
||||
|
||||
// now that reader3, reader4 and reader5 are closed,
|
||||
// the norms that those three readers shared should be
|
||||
// closed as well
|
||||
assertTrue(segmentReader3.normsClosed());
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void performTestsWithExceptionInReopen(TestReopen test) throws Exception {
|
||||
IndexReader index1 = test.openReader();
|
||||
|
@ -717,31 +453,20 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
final TestReopen test = new TestReopen() {
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
if (i % 3 == 0) {
|
||||
IndexReader modifier = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
modifier.setNorm(i, "field1", sim.encodeNormValue(50f));
|
||||
modifier.close();
|
||||
} else if (i % 3 == 1) {
|
||||
IndexReader modifier = IndexReader.open(dir, false);
|
||||
modifier.deleteDocument(i % modifier.maxDoc());
|
||||
modifier.close();
|
||||
} else {
|
||||
IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
modifier.addDocument(createDocument(n + i, 6));
|
||||
modifier.close();
|
||||
}
|
||||
IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
modifier.addDocument(createDocument(n + i, 6));
|
||||
modifier.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir, false);
|
||||
return IndexReader.open(dir);
|
||||
}
|
||||
};
|
||||
|
||||
final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
|
||||
IndexReader firstReader = IndexReader.open(dir, false);
|
||||
IndexReader firstReader = IndexReader.open(dir);
|
||||
IndexReader reader = firstReader;
|
||||
final Random rnd = random;
|
||||
|
||||
|
@ -966,7 +691,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
w.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
if (multiSegment) {
|
||||
assertTrue(r.getSequentialSubReaders().length > 1);
|
||||
} else {
|
||||
|
@ -1009,21 +734,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
break;
|
||||
}
|
||||
case 1: {
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(4, "field1", sim.encodeNormValue(123f));
|
||||
reader.setNorm(44, "field2", sim.encodeNormValue(222f));
|
||||
reader.setNorm(44, "field4", sim.encodeNormValue(22f));
|
||||
reader.close();
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.forceMerge(1);
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
case 2: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.forceMerge(1);
|
||||
|
@ -1032,15 +748,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
w.close();
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(5, "field1", sim.encodeNormValue(123f));
|
||||
reader.setNorm(55, "field2", sim.encodeNormValue(222f));
|
||||
reader.close();
|
||||
break;
|
||||
}
|
||||
case 5: {
|
||||
case 3: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.close();
|
||||
|
@ -1053,7 +761,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
assertEquals(0, reader.getRefCount());
|
||||
|
||||
if (checkNormsClosed && reader instanceof SegmentReader) {
|
||||
assertTrue(((SegmentReader) reader).normsClosed());
|
||||
// TODO: should we really assert something here? we check for open files and this is obselete...
|
||||
// assertTrue(((SegmentReader) reader).normsClosed());
|
||||
}
|
||||
|
||||
if (checkSubReaders) {
|
||||
|
@ -1103,94 +812,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
protected abstract void modifyIndex(int i) throws IOException;
|
||||
}
|
||||
|
||||
public void testCloseOrig() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
createIndex(random, dir, false);
|
||||
IndexReader r1 = IndexReader.open(dir, false);
|
||||
IndexReader r2 = IndexReader.open(dir, false);
|
||||
r2.deleteDocument(0);
|
||||
r2.close();
|
||||
|
||||
IndexReader r3 = IndexReader.openIfChanged(r1);
|
||||
assertNotNull(r3);
|
||||
assertTrue(r1 != r3);
|
||||
r1.close();
|
||||
try {
|
||||
r1.document(2);
|
||||
fail("did not hit exception");
|
||||
} catch (AlreadyClosedException ace) {
|
||||
// expected
|
||||
}
|
||||
r3.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeletes() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
createIndex(random, dir, false); // Create an index with a bunch of docs (1 segment)
|
||||
|
||||
modifyIndex(0, dir); // Get delete bitVector on 1st segment
|
||||
modifyIndex(5, dir); // Add a doc (2 segments)
|
||||
|
||||
IndexReader r1 = IndexReader.open(dir, false); // MSR
|
||||
|
||||
modifyIndex(5, dir); // Add another doc (3 segments)
|
||||
|
||||
IndexReader r2 = IndexReader.openIfChanged(r1); // MSR
|
||||
assertNotNull(r2);
|
||||
assertNull(IndexReader.openIfChanged(r2));
|
||||
assertTrue(r1 != r2);
|
||||
|
||||
SegmentReader sr1 = (SegmentReader) r1.getSequentialSubReaders()[0]; // Get SRs for the first segment from original
|
||||
SegmentReader sr2 = (SegmentReader) r2.getSequentialSubReaders()[0]; // and reopened IRs
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
|
||||
r2.deleteDocument(0);
|
||||
|
||||
// r1 should not see the delete
|
||||
final Bits r1LiveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertFalse(r1LiveDocs != null && !r1LiveDocs.get(0));
|
||||
|
||||
// Now r2 should have made a private copy of deleted docs:
|
||||
assertTrue(sr1.liveDocs!=sr2.liveDocs);
|
||||
|
||||
r1.close();
|
||||
r2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeletes2() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
createIndex(random, dir, false);
|
||||
// Get delete bitVector
|
||||
modifyIndex(0, dir);
|
||||
IndexReader r1 = IndexReader.open(dir, false);
|
||||
|
||||
// Add doc:
|
||||
modifyIndex(5, dir);
|
||||
|
||||
IndexReader r2 = IndexReader.openIfChanged(r1);
|
||||
assertNotNull(r2);
|
||||
assertTrue(r1 != r2);
|
||||
|
||||
IndexReader[] rs2 = r2.getSequentialSubReaders();
|
||||
|
||||
SegmentReader sr1 = getOnlySegmentReader(r1);
|
||||
SegmentReader sr2 = (SegmentReader) rs2[0];
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
final BitVector liveDocs = sr1.liveDocs;
|
||||
r1.close();
|
||||
|
||||
r2.deleteDocument(0);
|
||||
assertTrue(liveDocs==sr2.liveDocs);
|
||||
r2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private static class KeepAllCommits implements IndexDeletionPolicy {
|
||||
public void onInit(List<? extends IndexCommit> commits) {
|
||||
}
|
||||
|
@ -1223,7 +844,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
assertEquals(0, r.numDocs());
|
||||
|
||||
Collection<IndexCommit> commits = IndexReader.listCommits(dir);
|
||||
|
@ -1232,14 +853,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
assertNotNull(r2);
|
||||
assertTrue(r2 != r);
|
||||
|
||||
// Reader should be readOnly
|
||||
try {
|
||||
r2.deleteDocument(0);
|
||||
fail("no exception hit");
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
final Map<String,String> s = commit.getUserData();
|
||||
final int v;
|
||||
if (s.size() == 0) {
|
||||
|
@ -1259,54 +872,4 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1579: Make sure all SegmentReaders are new when
|
||||
// reopen switches readOnly
|
||||
public void testReopenChangeReadonly() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("number", "17", StringField.TYPE_UNSTORED));
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
assertTrue(r instanceof DirectoryReader);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number", false);
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
||||
// Reopen to readonly w/ no chnages
|
||||
IndexReader r3 = IndexReader.openIfChanged(r, true);
|
||||
assertNotNull(r3);
|
||||
assertTrue(((DirectoryReader) r3).readOnly);
|
||||
r3.close();
|
||||
|
||||
// Add new segment
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Reopen reader1 --> reader2
|
||||
IndexReader r2 = IndexReader.openIfChanged(r, true);
|
||||
assertNotNull(r2);
|
||||
r.close();
|
||||
assertTrue(((DirectoryReader) r2).readOnly);
|
||||
IndexReader[] subs = r2.getSequentialSubReaders();
|
||||
final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number", false);
|
||||
r2.close();
|
||||
|
||||
assertTrue(((SegmentReader) subs[0]).readOnly);
|
||||
assertTrue(((SegmentReader) subs[1]).readOnly);
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,19 +90,19 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
// add 100 documents
|
||||
for (i = 0; i < 100; i++) {
|
||||
addDoc(writer);
|
||||
addDocWithIndex(writer,i);
|
||||
}
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
// delete 40 documents
|
||||
reader = IndexReader.open(dir, false);
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
|
||||
for (i = 0; i < 40; i++) {
|
||||
reader.deleteDocument(i);
|
||||
writer.deleteDocuments(new Term("id", ""+i));
|
||||
}
|
||||
reader.close();
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(60, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -115,7 +115,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// check that the index reader gives the same numbers.
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(60, reader.maxDoc());
|
||||
assertEquals(60, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -182,7 +182,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// now open reader:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals("should be one document", reader.numDocs(), 1);
|
||||
|
||||
// now open index for create:
|
||||
|
@ -192,7 +192,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
assertEquals("should be one document", reader.numDocs(), 1);
|
||||
IndexReader reader2 = IndexReader.open(dir, true);
|
||||
IndexReader reader2 = IndexReader.open(dir);
|
||||
assertEquals("should be one document", reader2.numDocs(), 1);
|
||||
reader.close();
|
||||
reader2.close();
|
||||
|
@ -227,7 +227,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.maxDoc());
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -236,7 +236,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.maxDoc());
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
@ -258,7 +258,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(100, reader.maxDoc());
|
||||
assertEquals(100, reader.numDocs());
|
||||
for(int j=0;j<100;j++) {
|
||||
|
@ -452,7 +452,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals(300, hits.length);
|
||||
|
@ -484,7 +484,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
Term searchTerm = new Term("field", "aaa");
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals(10, hits.length);
|
||||
|
@ -507,14 +507,14 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals(27, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
reader.close();
|
||||
|
||||
dir.close();
|
||||
|
@ -541,7 +541,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(1, reader.maxDoc());
|
||||
assertEquals(1, reader.numDocs());
|
||||
Term t = new Term("field", "a");
|
||||
|
@ -586,7 +586,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("did not get right number of hits", 100, hits.length);
|
||||
|
@ -643,7 +643,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
writer.addDocument(new Document());
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(2, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -698,7 +698,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
public void testVariableSchema() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
int delID = 0;
|
||||
for(int i=0;i<20;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: iter=" + i);
|
||||
|
@ -730,9 +729,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocument(delID++);
|
||||
reader.close();
|
||||
|
||||
if (0 == i % 4) {
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
|
@ -830,7 +826,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
t1.join();
|
||||
|
||||
// Make sure reader can read
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
reader.close();
|
||||
|
||||
// Reopen
|
||||
|
@ -858,7 +854,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
Term t = new Term("field", "x");
|
||||
assertEquals(1, reader.docFreq(t));
|
||||
reader.close();
|
||||
|
@ -885,7 +881,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
doc.add(newField("", "a b c", TextField.TYPE_UNSTORED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexReader subreader = getOnlySegmentReader(reader);
|
||||
TermsEnum te = subreader.fields().terms("").iterator(null);
|
||||
assertEquals(new BytesRef("a"), te.next());
|
||||
|
@ -906,7 +902,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
doc.add(newField("", "c", StringField.TYPE_UNSTORED));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexReader subreader = getOnlySegmentReader(reader);
|
||||
TermsEnum te = subreader.fields().terms("").iterator(null);
|
||||
assertEquals(new BytesRef(""), te.next());
|
||||
|
@ -960,7 +956,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertTrue(w.afterWasCalled);
|
||||
w.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
assertEquals(0, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
|
@ -994,7 +990,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
IndexSearcher s = new IndexSearcher(r);
|
||||
PhraseQuery pq = new PhraseQuery();
|
||||
pq.add(new Term("field", "a"));
|
||||
|
@ -1043,7 +1039,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
Document doc2 = ir.document(0);
|
||||
IndexableField f2 = doc2.getField("binary");
|
||||
b = f2.binaryValue().bytes;
|
||||
|
@ -1072,7 +1068,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
Terms tpv = r.getTermVectors(0).terms("field");
|
||||
TermsEnum termsEnum = tpv.iterator(null);
|
||||
assertNotNull(termsEnum.next());
|
||||
|
@ -1136,12 +1132,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
writer2.addDocument(doc);
|
||||
writer2.close();
|
||||
|
||||
IndexReader r1 = IndexReader.open(dir2, true);
|
||||
IndexReader r1 = IndexReader.open(dir2);
|
||||
IndexReader r2 = (IndexReader) r1.clone();
|
||||
writer.addIndexes(r1, r2);
|
||||
writer.close();
|
||||
|
||||
IndexReader r3 = IndexReader.open(dir, true);
|
||||
IndexReader r3 = IndexReader.open(dir);
|
||||
assertEquals(5, r3.numDocs());
|
||||
r3.close();
|
||||
|
||||
|
@ -1186,7 +1182,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.close();
|
||||
w = null;
|
||||
_TestUtil.checkIndex(dir);
|
||||
IndexReader.open(dir, true).close();
|
||||
IndexReader.open(dir).close();
|
||||
|
||||
// Strangely, if we interrupt a thread before
|
||||
// all classes are loaded, the class loader
|
||||
|
@ -1236,7 +1232,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
e.printStackTrace(System.out);
|
||||
}
|
||||
try {
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
//System.out.println("doc count=" + r.numDocs());
|
||||
r.close();
|
||||
} catch (Exception e) {
|
||||
|
@ -1322,7 +1318,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.forceMerge(1); // force segment merge.
|
||||
w.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
Document doc2 = ir.document(0);
|
||||
IndexableField f3 = doc2.getField("binary");
|
||||
b = f3.binaryValue().bytes;
|
||||
|
|
|
@ -51,21 +51,21 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 14, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for(int i=0;i<3;i++) {
|
||||
for(int j=0;j<11;j++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(r);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
|
@ -78,7 +78,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.close();
|
||||
assertFalse("reader should not be current now", reader.isCurrent());
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(r);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
|
||||
|
@ -105,7 +105,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("first number of hits", 14, hits.length);
|
||||
|
@ -120,7 +120,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
// Delete all docs:
|
||||
writer.deleteDocuments(searchTerm);
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
|
@ -132,7 +132,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("saw changes after writer.abort", 14, hits.length);
|
||||
|
@ -152,7 +152,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
for(int j=0;j<17;j++) {
|
||||
TestIndexWriter.addDoc(writer);
|
||||
}
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(r);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
|
||||
|
@ -161,7 +161,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
}
|
||||
|
||||
writer.close();
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(r);
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
assertEquals("didn't see changes after close", 218, hits.length);
|
||||
|
@ -243,7 +243,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
IndexReader.open(dir, true).close();
|
||||
IndexReader.open(dir).close();
|
||||
|
||||
long endDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
|
||||
|
@ -287,7 +287,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
|
||||
// Open a reader before closing (commiting) the writer:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
|
||||
// Reader should see index as multi-seg at this
|
||||
// point:
|
||||
|
@ -299,7 +299,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
|
||||
|
||||
// Open a reader after aborting writer:
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
|
||||
// Reader should still see index as multi-segment
|
||||
assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1);
|
||||
|
@ -318,7 +318,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
|
||||
|
||||
// Open a reader after aborting writer:
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
|
||||
// Reader should see index as one segment
|
||||
assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().length);
|
||||
|
@ -398,7 +398,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
writer.commit();
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader);
|
||||
|
@ -411,12 +411,12 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
TestIndexWriter.addDoc(writer);
|
||||
assertEquals(23, reader2.numDocs());
|
||||
reader2.close();
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
writer.commit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(40, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
|
@ -473,33 +473,10 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
assertNotNull(commit);
|
||||
|
||||
IndexReader r = IndexReader.open(commit, true);
|
||||
assertEquals(2, r.numDocs());
|
||||
r.close();
|
||||
|
||||
// open "second", w/ writeable IndexReader & commit
|
||||
r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
|
||||
assertEquals(2, r.numDocs());
|
||||
r.deleteDocument(0);
|
||||
r.deleteDocument(1);
|
||||
commitData.put("tag", "fourth");
|
||||
r.commit(commitData);
|
||||
r.close();
|
||||
|
||||
// make sure "third" commit is still there
|
||||
commit = null;
|
||||
for(IndexCommit c : IndexReader.listCommits(dir)) {
|
||||
if (c.getUserData().get("tag").equals("third")) {
|
||||
commit = c;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertNotNull(commit);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testNoCommits() throws Exception {
|
||||
public void testZeroCommits() throws Exception {
|
||||
// Tests that if we don't call commit(), the directory has 0 commits. This has
|
||||
// changed since LUCENE-2386, where before IW would always commit on a fresh
|
||||
// new index.
|
||||
|
@ -532,12 +509,12 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, true);
|
||||
IndexReader reader2 = IndexReader.open(dir);
|
||||
assertEquals(0, reader2.numDocs());
|
||||
|
||||
writer.commit();
|
||||
|
@ -555,18 +532,18 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
assertEquals(23, reader3.numDocs());
|
||||
reader3.close();
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.commit();
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(40, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
|
@ -589,12 +566,12 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
for (int i = 0; i < 23; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
IndexReader reader2 = IndexReader.open(dir, true);
|
||||
IndexReader reader2 = IndexReader.open(dir);
|
||||
assertEquals(0, reader2.numDocs());
|
||||
|
||||
writer.rollback();
|
||||
|
@ -610,18 +587,18 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
for (int i = 0; i < 17; i++)
|
||||
TestIndexWriter.addDoc(writer);
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.prepareCommit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer.commit();
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(17, reader.numDocs());
|
||||
reader.close();
|
||||
writer.close();
|
||||
|
@ -637,7 +614,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -653,7 +630,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
assertEquals(0, IndexReader.getCommitUserData(dir).size());
|
||||
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
// commit(Map) never called for this index
|
||||
assertEquals(0, r.getCommitUserData().size());
|
||||
r.close();
|
||||
|
@ -668,7 +645,7 @@ public class TestIndexWriterCommit extends LuceneTestCase {
|
|||
|
||||
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
|
||||
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
assertEquals("test1", r.getCommitUserData().get("label"));
|
||||
r.close();
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
modifier.commit();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -114,7 +114,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
modifier.commit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(0, reader.numDocs());
|
||||
reader.close();
|
||||
modifier.close();
|
||||
|
@ -166,7 +166,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
assertEquals(0, modifier.getSegmentCount());
|
||||
modifier.commit();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(1, reader.numDocs());
|
||||
|
||||
int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
|
||||
|
@ -204,7 +204,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
modifier.commit();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(5, reader.numDocs());
|
||||
modifier.close();
|
||||
reader.close();
|
||||
|
@ -226,7 +226,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
}
|
||||
modifier.commit();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -236,7 +236,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
modifier.commit();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(5, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -246,7 +246,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
}
|
||||
modifier.deleteDocuments(terms);
|
||||
modifier.commit();
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(2, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -269,7 +269,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
}
|
||||
modifier.commit();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -280,7 +280,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.deleteAll();
|
||||
|
||||
// Delete all shouldn't be on disk yet
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -292,7 +292,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.commit();
|
||||
|
||||
// Validate there are no docs left
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(2, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -317,7 +317,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
addDoc(modifier, ++id, value);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -329,7 +329,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.close();
|
||||
|
||||
// Validate that the docs are still there
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -372,7 +372,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.close();
|
||||
|
||||
// Validate that the docs are still there
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
assertEquals(7, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
|
@ -567,7 +567,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
// changed (transactional semantics):
|
||||
IndexReader newReader = null;
|
||||
try {
|
||||
newReader = IndexReader.open(dir, true);
|
||||
newReader = IndexReader.open(dir);
|
||||
}
|
||||
catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
|
|
|
@ -254,7 +254,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// Confirm that when doc hits exception partway through tokenization, it's deleted:
|
||||
IndexReader r2 = IndexReader.open(dir, true);
|
||||
IndexReader r2 = IndexReader.open(dir);
|
||||
final int count = r2.docFreq(new Term("content4", "aaa"));
|
||||
final int count2 = r2.docFreq(new Term("content4", "ddd"));
|
||||
assertEquals(count, count2);
|
||||
|
@ -300,7 +300,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// Confirm that when doc hits exception partway through tokenization, it's deleted:
|
||||
IndexReader r2 = IndexReader.open(dir, true);
|
||||
IndexReader r2 = IndexReader.open(dir);
|
||||
final int count = r2.docFreq(new Term("content4", "aaa"));
|
||||
final int count2 = r2.docFreq(new Term("content4", "ddd"));
|
||||
assertEquals(count, count2);
|
||||
|
@ -494,7 +494,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
final Term t = new Term("content", "aa");
|
||||
assertEquals(3, reader.docFreq(t));
|
||||
|
||||
|
@ -576,7 +576,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
}
|
||||
assertTrue(hitError);
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(198, reader.docFreq(new Term("content", "aa")));
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -631,7 +631,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: open reader");
|
||||
}
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
if (i == 0) {
|
||||
int expected = 5;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
|
@ -660,7 +660,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
int expected = 19+(1-i)*2;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
|
@ -746,7 +746,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
writer.close();
|
||||
}
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
|
||||
assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
|
@ -774,7 +774,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
expected += 17-NUM_THREAD*NUM_ITER;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
|
@ -845,7 +845,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
failure.clearDoFail();
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(23, reader.numDocs());
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -1058,7 +1058,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail("segmentInfos failed to retry fallback to correct segments_N file");
|
||||
|
@ -1105,7 +1105,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
fail("reader did not hit IOException on opening a corrupt index");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
|
@ -1154,7 +1154,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
fail("reader did not hit IOException on opening a corrupt index");
|
||||
} catch (Exception e) {
|
||||
}
|
||||
|
@ -1205,7 +1205,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
} catch (Exception e) {
|
||||
fail("reader failed to open on a crashed index");
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase {
|
|||
|
||||
if (0 == pass) {
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals(1, reader.getSequentialSubReaders().length);
|
||||
reader.close();
|
||||
} else {
|
||||
|
@ -196,7 +196,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertTrue(reader.getSequentialSubReaders().length > 1);
|
||||
reader.close();
|
||||
|
||||
|
|
|
@ -192,9 +192,14 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
reader.deleteDocuments(new Term("content", "aaa"));
|
||||
reader.close();
|
||||
// delete some docs without merging
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)
|
||||
);
|
||||
writer.deleteDocuments(new Term("content", "aaa"));
|
||||
writer.close();
|
||||
|
||||
ldmp = new LogDocMergePolicy();
|
||||
ldmp.setMergeFactor(5);
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
private boolean verifyIndex(Directory directory, int startAt) throws IOException
|
||||
{
|
||||
boolean fail = false;
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
|
||||
int max = reader.maxDoc();
|
||||
for (int i = 0; i < max; i++)
|
||||
|
@ -126,8 +126,6 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
Document document = new Document();
|
||||
|
||||
document = new Document();
|
||||
|
||||
FieldType customType = new FieldType();
|
||||
customType.setStored(true);
|
||||
|
||||
|
@ -137,19 +135,31 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
customType1.setStoreTermVectorPositions(true);
|
||||
customType1.setStoreTermVectorOffsets(true);
|
||||
|
||||
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
|
||||
document.add(idField);
|
||||
Field storedField = newField("stored", "stored", customType);
|
||||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector", customType1);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<10;i++)
|
||||
for(int i=0;i<10;i++) {
|
||||
idField.setValue("" + i);
|
||||
writer.addDocument(document);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
assertEquals(10, ir.maxDoc());
|
||||
assertEquals(10, ir.numDocs());
|
||||
ir.deleteDocument(0);
|
||||
ir.deleteDocument(7);
|
||||
ir.close();
|
||||
|
||||
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(dir, dontMergeConfig);
|
||||
writer.deleteDocuments(new Term("id", "0"));
|
||||
writer.deleteDocuments(new Term("id", "7"));
|
||||
writer.close();
|
||||
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(8, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
|
@ -159,7 +169,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
writer.forceMergeDeletes();
|
||||
assertEquals(8, writer.numDocs());
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(8, ir.maxDoc());
|
||||
assertEquals(8, ir.numDocs());
|
||||
ir.close();
|
||||
|
@ -192,15 +202,28 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector", customType1);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<98;i++)
|
||||
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
|
||||
document.add(idField);
|
||||
for(int i=0;i<98;i++) {
|
||||
idField.setValue("" + i);
|
||||
writer.addDocument(document);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
assertEquals(98, ir.maxDoc());
|
||||
assertEquals(98, ir.numDocs());
|
||||
for(int i=0;i<98;i+=2)
|
||||
ir.deleteDocument(i);
|
||||
ir.close();
|
||||
|
||||
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(dir, dontMergeConfig);
|
||||
for(int i=0;i<98;i+=2) {
|
||||
writer.deleteDocuments(new Term("id", "" + i));
|
||||
}
|
||||
writer.close();
|
||||
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
|
@ -212,7 +235,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
assertEquals(49, writer.numDocs());
|
||||
writer.forceMergeDeletes();
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(49, ir.maxDoc());
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
@ -245,15 +268,27 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector", customType1);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<98;i++)
|
||||
Field idField = newField("id", "", StringField.TYPE_UNSTORED);
|
||||
document.add(idField);
|
||||
for(int i=0;i<98;i++) {
|
||||
idField.setValue("" + i);
|
||||
writer.addDocument(document);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
assertEquals(98, ir.maxDoc());
|
||||
assertEquals(98, ir.numDocs());
|
||||
for(int i=0;i<98;i+=2)
|
||||
ir.deleteDocument(i);
|
||||
ir.close();
|
||||
|
||||
IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
|
||||
writer = new IndexWriter(dir, dontMergeConfig);
|
||||
for(int i=0;i<98;i+=2) {
|
||||
writer.deleteDocuments(new Term("id", "" + i));
|
||||
}
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
|
@ -264,7 +299,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
);
|
||||
writer.forceMergeDeletes(false);
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
ir = IndexReader.open(dir);
|
||||
assertEquals(49, ir.maxDoc());
|
||||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
|
|
@ -113,7 +113,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
assertNoUnreferencedFiles(dir, "after disk full during addDocument");
|
||||
|
||||
// Make sure reader can open the index:
|
||||
IndexReader.open(dir, true).close();
|
||||
IndexReader.open(dir).close();
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
@ -189,7 +189,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
|
||||
// Make sure starting index seems to be working properly:
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
IndexReader reader = IndexReader.open(startDir, true);
|
||||
IndexReader reader = IndexReader.open(startDir);
|
||||
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
|
||||
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
|
@ -306,7 +306,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
} else if (1 == method) {
|
||||
IndexReader readers[] = new IndexReader[dirs.length];
|
||||
for(int i=0;i<dirs.length;i++) {
|
||||
readers[i] = IndexReader.open(dirs[i], true);
|
||||
readers[i] = IndexReader.open(dirs[i]);
|
||||
}
|
||||
try {
|
||||
writer.addIndexes(readers);
|
||||
|
@ -355,7 +355,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
// failed, we see either all docs or no docs added
|
||||
// (transactional semantics):
|
||||
try {
|
||||
reader = IndexReader.open(dir, true);
|
||||
reader = IndexReader.open(dir);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail(testName + ": exception when creating IndexReader: " + e);
|
||||
|
|
|
@ -159,7 +159,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
writer.close();
|
||||
assertTrue(r2.isCurrent());
|
||||
|
||||
IndexReader r3 = IndexReader.open(dir1, true);
|
||||
IndexReader r3 = IndexReader.open(dir1);
|
||||
assertTrue(r3.isCurrent());
|
||||
assertTrue(r2.isCurrent());
|
||||
assertEquals(0, count(new Term("id", id10), r3));
|
||||
|
@ -384,7 +384,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(mainDir);
|
||||
|
||||
IndexReader reader = IndexReader.open(mainDir, true);
|
||||
IndexReader reader = IndexReader.open(mainDir);
|
||||
assertEquals(addDirThreads.count.intValue(), reader.numDocs());
|
||||
//assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.numThreads
|
||||
// * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
|
||||
|
@ -420,7 +420,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
readers = new IndexReader[numDirs];
|
||||
for (int i = 0; i < numDirs; i++)
|
||||
readers[i] = IndexReader.open(addDir, false);
|
||||
readers[i] = IndexReader.open(addDir);
|
||||
}
|
||||
|
||||
void joinThreads() {
|
||||
|
@ -892,7 +892,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
w.forceMergeDeletes();
|
||||
w.close();
|
||||
r.close();
|
||||
r = IndexReader.open(dir, true);
|
||||
r = IndexReader.open(dir);
|
||||
assertEquals(1, r.numDocs());
|
||||
assertFalse(r.hasDeletions());
|
||||
r.close();
|
||||
|
|
|
@ -260,7 +260,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
IndexReader ir = IndexReader.open(dir, true);
|
||||
IndexReader ir = IndexReader.open(dir);
|
||||
Document doc2 = ir.document(0);
|
||||
for(int i=0;i<count;i++) {
|
||||
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
|
||||
|
|
|
@ -209,7 +209,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// Quick test to make sure index is not corrupt:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
DocsEnum tdocs = _TestUtil.docs(random, reader,
|
||||
"field",
|
||||
new BytesRef("aaa"),
|
||||
|
@ -276,7 +276,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
}
|
||||
|
||||
if (success) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
final Bits delDocs = MultiFields.getLiveDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
|
@ -447,7 +447,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
assertFalse("Failed due to: " + thread1.failure, thread1.failed);
|
||||
assertFalse("Failed due to: " + thread2.failure, thread2.failed);
|
||||
// now verify that we have two documents in the index
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
IndexReader reader = IndexReader.open(dir);
|
||||
assertEquals("IndexReader should have one document per thread running", 2,
|
||||
reader.numDocs());
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory, false));
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(directory));
|
||||
|
||||
this.searcher = newSearcher(reader);
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
}
|
||||
|
||||
writer.close();
|
||||
IndexReader reader = IndexReader.open(directory, true);
|
||||
IndexReader reader = IndexReader.open(directory);
|
||||
|
||||
DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getLiveDocs(reader),
|
||||
|
|
|
@ -38,8 +38,8 @@ public class TestMultiReader extends TestDirectoryReader {
|
|||
IndexReader reader;
|
||||
|
||||
sis.read(dir);
|
||||
SegmentReader reader1 = SegmentReader.get(false, sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader2 = SegmentReader.get(false, sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader1 = SegmentReader.get(sis.info(0), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
SegmentReader reader2 = SegmentReader.get(sis.info(1), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
readers[0] = reader1;
|
||||
readers[1] = reader2;
|
||||
assertTrue(reader1 != null);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue