mirror of https://github.com/apache/lucene.git
LUCENE-3606: Die, IR.setNorm(), die
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3606@1210031 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0122c4f140
commit
498cba2fac
|
@ -1112,11 +1112,6 @@ public class MemoryIndex {
|
|||
}
|
||||
return norms;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int doc, String fieldName, byte value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
|
|
|
@ -1,159 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Copyright 2006 The Apache Software Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
/**
|
||||
* Given a directory and a list of fields, updates the fieldNorms in place for every document.
|
||||
*
|
||||
* If Similarity class is specified, uses its computeNorm method to set norms.
|
||||
* If -n command line argument is used, removed field norms, as if
|
||||
* {@link org.apache.lucene.document.FieldType#setOmitNorms(boolean)} was used.
|
||||
*
|
||||
* <p>
|
||||
* NOTE: This will overwrite any length normalization or field/document boosts.
|
||||
* </p>
|
||||
*
|
||||
*/
|
||||
public class FieldNormModifier {
|
||||
|
||||
/**
|
||||
* Command Line Execution method.
|
||||
*
|
||||
* <pre>
|
||||
* Usage: FieldNormModifier /path/index <package.SimilarityClassName | -n> field1 field2 ...
|
||||
* </pre>
|
||||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
if (args.length < 3) {
|
||||
System.err.println("Usage: FieldNormModifier <index> <package.SimilarityClassName | -d> <field1> [field2] ...");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
SimilarityProvider s = null;
|
||||
|
||||
if (args[1].equals("-d"))
|
||||
args[1] = DefaultSimilarity.class.getName();
|
||||
|
||||
try {
|
||||
s = Class.forName(args[1]).asSubclass(SimilarityProvider.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
Directory d = FSDirectory.open(new File(args[0]));
|
||||
FieldNormModifier fnm = new FieldNormModifier(d, s);
|
||||
|
||||
for (int i = 2; i < args.length; i++) {
|
||||
System.out.print("Updating field: " + args[i] + " " + (new Date()).toString() + " ... ");
|
||||
fnm.reSetNorms(args[i]);
|
||||
System.out.println(new Date().toString());
|
||||
}
|
||||
|
||||
d.close();
|
||||
}
|
||||
|
||||
|
||||
private Directory dir;
|
||||
private SimilarityProvider sim;
|
||||
|
||||
/**
|
||||
* Constructor for code that wishes to use this class programmatically
|
||||
* If Similarity is null, kill the field norms.
|
||||
*
|
||||
* @param d the Directory to modify
|
||||
* @param s the Similarity to use (can be null)
|
||||
*/
|
||||
public FieldNormModifier(Directory d, SimilarityProvider s) {
|
||||
dir = d;
|
||||
sim = s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the norms for the specified field.
|
||||
*
|
||||
* <p>
|
||||
* Opens a new IndexReader on the Directory given to this instance,
|
||||
* modifies the norms (either using the Similarity given to this instance, or by using fake norms,
|
||||
* and closes the IndexReader.
|
||||
* </p>
|
||||
*
|
||||
* @param field the field whose norms should be reset
|
||||
*/
|
||||
public void reSetNorms(String field) throws IOException {
|
||||
Similarity fieldSim = sim.get(field);
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
||||
final List<IndexReader> subReaders = new ArrayList<IndexReader>();
|
||||
ReaderUtil.gatherSubReaders(subReaders, reader);
|
||||
|
||||
final FieldInvertState invertState = new FieldInvertState();
|
||||
for(IndexReader subReader : subReaders) {
|
||||
final Bits liveDocs = subReader.getLiveDocs();
|
||||
|
||||
int[] termCounts = new int[subReader.maxDoc()];
|
||||
Fields fields = subReader.fields();
|
||||
if (fields != null) {
|
||||
Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
DocsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
while(true) {
|
||||
int docID = docs.nextDoc();
|
||||
if (docID != docs.NO_MORE_DOCS) {
|
||||
termCounts[docID] += docs.freq();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
invertState.setBoost(1.0f);
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (liveDocs == null || liveDocs.get(d)) {
|
||||
invertState.setLength(termCounts[d]);
|
||||
subReader.setNorm(d, field, fieldSim.computeNorm(invertState));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} finally {
|
||||
if (null != reader) reader.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,259 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests changing of field norms with a custom similarity and with fake norms.
|
||||
*/
|
||||
public class TestFieldNormModifier extends LuceneTestCase {
|
||||
public static int NUM_DOCS = 5;
|
||||
|
||||
public Directory store;
|
||||
|
||||
/** inverts the normal notion of lengthNorm */
|
||||
public static SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
store = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
Document d = new Document();
|
||||
|
||||
d.add(newField("field", "word", TextField.TYPE_STORED));
|
||||
|
||||
d.add(newField("nonorm", "word", StringField.TYPE_STORED));
|
||||
d.add(newField("untokfield", "20061212 20071212", TextField.TYPE_STORED));
|
||||
|
||||
for (int j = 1; j <= i; j++) {
|
||||
d.add(newField("field", "crap", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "more words", StringField.TYPE_STORED));
|
||||
}
|
||||
writer.addDocument(d);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
store.close();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testMissingField() throws Exception {
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nobodyherebutuschickens");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = MultiNorms.norms(r, "nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nonorm");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = MultiNorms.norms(r, "nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
}
|
||||
|
||||
|
||||
public void testGoodCases() throws Exception {
|
||||
|
||||
IndexReader reader = IndexReader.open(store);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should put docs with shorter length first
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = Float.MAX_VALUE;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " <= " + lastScore;
|
||||
assertTrue(msg, scores[i] <= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
reader = IndexReader.open(store);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = 0.0f;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " >= " + lastScore;
|
||||
assertTrue(msg, scores[i] >= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
}
|
||||
|
||||
public void testNormKiller() throws IOException {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] oldNorms = MultiNorms.norms(r, "untokfield");
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("untokfield");
|
||||
|
||||
r = IndexReader.open(store, false);
|
||||
byte[] newNorms = MultiNorms.norms(r, "untokfield");
|
||||
r.close();
|
||||
assertFalse(Arrays.equals(oldNorms, newNorms));
|
||||
|
||||
|
||||
// verify that we still get documents in the same order as originally
|
||||
IndexReader reader = IndexReader.open(store);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should return the same score for all documents for this query
|
||||
searcher.search(new TermQuery(new Term("untokfield", "20061212")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = scores[0];
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", " + scores[i] + " == " + lastScore;
|
||||
assertTrue(msg, scores[i] == lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,222 +0,0 @@
|
|||
package org.apache.lucene.misc;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
import org.apache.lucene.index.FieldNormModifier;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MultiNorms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests changing the norms after changing the simularity
|
||||
*/
|
||||
public class TestLengthNormModifier extends LuceneTestCase {
|
||||
public static int NUM_DOCS = 5;
|
||||
|
||||
public Directory store;
|
||||
|
||||
/** inverts the normal notion of lengthNorm */
|
||||
public static SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
store = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("field", "word", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "word", StringField.TYPE_STORED));
|
||||
|
||||
for (int j = 1; j <= i; j++) {
|
||||
d.add(newField("field", "crap", TextField.TYPE_STORED));
|
||||
d.add(newField("nonorm", "more words", StringField.TYPE_STORED));
|
||||
}
|
||||
writer.addDocument(d);
|
||||
}
|
||||
writer.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
store.close();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
public void testMissingField() throws Exception {
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nobodyherebutuschickens");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldWithNoNorm() throws Exception {
|
||||
|
||||
IndexReader r = IndexReader.open(store, false);
|
||||
byte[] norms = MultiNorms.norms(r, "nonorm");
|
||||
|
||||
// sanity check, norms should all be 1
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
try {
|
||||
fnm.reSetNorms("nonorm");
|
||||
} catch (IllegalStateException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// nothing should have changed
|
||||
r = IndexReader.open(store, false);
|
||||
|
||||
norms = MultiNorms.norms(r, "nonorm");
|
||||
assertTrue("Whoops we have norms?", !r.hasNorms("nonorm"));
|
||||
assertNull(norms);
|
||||
|
||||
r.close();
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void testGoodCases() throws Exception {
|
||||
|
||||
IndexSearcher searcher;
|
||||
final float[] scores = new float[NUM_DOCS];
|
||||
float lastScore = 0.0f;
|
||||
|
||||
// default similarity should put docs with shorter length first
|
||||
IndexReader reader = IndexReader.open(store, false);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = Float.MAX_VALUE;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
|
||||
assertTrue(msg, scores[i] <= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
// override the norms to be inverted
|
||||
SimilarityProvider s = new DefaultSimilarityProvider() {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
FieldNormModifier fnm = new FieldNormModifier(store, s);
|
||||
fnm.reSetNorms("field");
|
||||
|
||||
// new norm (with default similarity) should put longer docs first
|
||||
reader = IndexReader.open(store, false);
|
||||
searcher = new IndexSearcher(reader);
|
||||
searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
|
||||
private int docBase = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + docBase] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
lastScore = 0.0f;
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
String msg = "i=" + i + ", "+scores[i]+" >= "+lastScore;
|
||||
assertTrue(msg, scores[i] >= lastScore);
|
||||
//System.out.println(msg);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -592,13 +592,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
|
@ -647,7 +640,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
if (segmentInfos != null) {
|
||||
ensureOpen();
|
||||
if (stale)
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete operations");
|
||||
|
||||
if (writeLock == null) {
|
||||
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
|
||||
|
@ -661,14 +654,14 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
stale = true;
|
||||
this.writeLock.release();
|
||||
this.writeLock = null;
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
|
||||
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete operations");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or setNorm operations
|
||||
* Commit changes resulting from delete, undeleteAll operations
|
||||
* <p/>
|
||||
* If an exception is hit, then either no changes or all changes will have been committed to the index (transactional
|
||||
* semantics).
|
||||
|
|
|
@ -277,7 +277,7 @@ public class FilterIndexReader extends IndexReader {
|
|||
|
||||
/**
|
||||
* <p>Construct a FilterIndexReader based on the specified base reader.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* Directory locking for delete, undeleteAll operations is
|
||||
* left to the base reader.</p>
|
||||
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
|
||||
* @param in specified base reader.
|
||||
|
@ -346,11 +346,6 @@ public class FilterIndexReader extends IndexReader {
|
|||
return in.norms(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
|
||||
in.setNorm(d, f, b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
|
|
|
@ -910,39 +910,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
*/
|
||||
public abstract byte[] norms(String field) throws IOException;
|
||||
|
||||
/** Expert: Resets the normalization factor for the named field of the named
|
||||
* document. By default, the norm represents the product of the field's {@link
|
||||
* org.apache.lucene.document.Field#setBoost(float) boost} and its
|
||||
* length normalization}. Thus, to preserve the length normalization
|
||||
* values when resetting this, one should base the new value upon the old.
|
||||
*
|
||||
* <b>NOTE:</b> If this field does not index norms, then
|
||||
* this method throws {@link IllegalStateException}.
|
||||
*
|
||||
* @see #norms(String)
|
||||
* @see Similarity#computeNorm(FieldInvertState)
|
||||
* @see org.apache.lucene.search.similarities.DefaultSimilarity#decodeNormValue(byte)
|
||||
* @throws StaleReaderException if the index has changed
|
||||
* since this reader was opened
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @throws IllegalStateException if the field does not index norms
|
||||
*/
|
||||
public synchronized void setNorm(int doc, String field, byte value)
|
||||
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
acquireWriteLock();
|
||||
hasChanges = true;
|
||||
doSetNorm(doc, field, value);
|
||||
}
|
||||
|
||||
/** Implements setNorm in subclass.*/
|
||||
protected abstract void doSetNorm(int doc, String field, byte value)
|
||||
throws CorruptIndexException, IOException;
|
||||
|
||||
/**
|
||||
* Returns {@link Fields} for this reader.
|
||||
* This method may return null if the reader has no
|
||||
|
@ -1229,8 +1196,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or
|
||||
* setNorm operations
|
||||
* Commit changes resulting from delete, undeleteAll operations
|
||||
*
|
||||
* If an exception is hit, then either no changes or all
|
||||
* changes will have been committed to the index
|
||||
|
@ -1242,8 +1208,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Commit changes resulting from delete, undeleteAll, or
|
||||
* setNorm operations
|
||||
* Commit changes resulting from delete, undeleteAll operations
|
||||
*
|
||||
* If an exception is hit, then either no changes or all
|
||||
* changes will have been committed to the index
|
||||
|
@ -1415,7 +1380,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* that has no sub readers).
|
||||
* <p>
|
||||
* NOTE: You should not try using sub-readers returned by
|
||||
* this method to make any changes (setNorm, deleteDocument,
|
||||
* this method to make any changes (deleteDocument,
|
||||
* etc.). While this might succeed for one composite reader
|
||||
* (like MultiReader), it will most likely lead to index
|
||||
* corruption for other readers (like DirectoryReader obtained
|
||||
|
@ -1443,7 +1408,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* for performance reasons.
|
||||
* <p>
|
||||
* NOTE: You should not try using sub-readers returned by this method to make
|
||||
* any changes (setNorm, deleteDocument, etc.). While this might succeed for
|
||||
* any changes (deleteDocument, etc.). While this might succeed for
|
||||
* one composite reader (like MultiReader), it will most likely lead to index
|
||||
* corruption for other readers (like DirectoryReader obtained through
|
||||
* {@link #open}. Use the top-level context's reader directly.
|
||||
|
|
|
@ -41,7 +41,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* Directory locking for delete, undeleteAll operations is
|
||||
* left to the subreaders. </p>
|
||||
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
|
||||
* @param subReaders set of (sub)readers
|
||||
|
@ -52,7 +52,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
* Directory locking for delete, undeleteAll, and setNorm operations is
|
||||
* Directory locking for delete, undeleteAll operations is
|
||||
* left to the subreaders. </p>
|
||||
* @param closeSubReaders indicates whether the subreaders should be closed
|
||||
* when this MultiReader is closed
|
||||
|
@ -278,13 +278,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
int i = readerIndex(n); // find segment num
|
||||
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term t) throws IOException {
|
||||
ensureOpen();
|
||||
|
|
|
@ -402,18 +402,6 @@ public class ParallelReader extends IndexReader {
|
|||
return bytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
IndexReader reader = fieldToReader.get(field);
|
||||
if (reader!=null) {
|
||||
synchronized(normsCache) {
|
||||
normsCache.remove(field);
|
||||
}
|
||||
reader.doSetNorm(n, field, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(Term term) throws IOException {
|
||||
ensureOpen();
|
||||
|
|
|
@ -51,9 +51,6 @@ final class SegmentNorms implements Cloneable {
|
|||
private AtomicInteger bytesRef;
|
||||
private byte[] bytes;
|
||||
private int number;
|
||||
|
||||
boolean dirty;
|
||||
boolean rollbackDirty;
|
||||
|
||||
private final SegmentReader owner;
|
||||
|
||||
|
@ -154,27 +151,6 @@ final class SegmentNorms implements Cloneable {
|
|||
AtomicInteger bytesRef() {
|
||||
return bytesRef;
|
||||
}
|
||||
|
||||
// Called if we intend to change a norm value. We make a
|
||||
// private copy of bytes if it's shared with others:
|
||||
public synchronized byte[] copyOnWrite() throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
|
||||
bytes();
|
||||
assert bytes != null;
|
||||
assert bytesRef != null;
|
||||
if (bytesRef.get() > 1) {
|
||||
// I cannot be the origNorm for another norm
|
||||
// instance if I'm being changed. Ie, only the
|
||||
// "head Norm" can be changed:
|
||||
assert refCount == 1;
|
||||
final AtomicInteger oldRef = bytesRef;
|
||||
bytes = owner.cloneNormBytes(bytes);
|
||||
bytesRef = new AtomicInteger(1);
|
||||
oldRef.decrementAndGet();
|
||||
}
|
||||
dirty = true;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
// Returns a copy of this Norm instance that shares
|
||||
// IndexInput & bytes with the original one
|
||||
|
@ -211,35 +187,4 @@ final class SegmentNorms implements Cloneable {
|
|||
|
||||
return clone;
|
||||
}
|
||||
|
||||
// Flush all pending changes to the next generation
|
||||
// separate norms file.
|
||||
public void reWrite(SegmentInfo si) throws IOException {
|
||||
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0): "refCount=" + refCount + " origNorm=" + origNorm;
|
||||
|
||||
// NOTE: norms are re-written in regular directory, not cfs
|
||||
si.advanceNormGen(this.number);
|
||||
final String normFileName = si.getNormFileName(this.number);
|
||||
IndexOutput out = owner.directory().createOutput(normFileName, new IOContext(new FlushInfo(si.docCount, 0)));
|
||||
boolean success = false;
|
||||
try {
|
||||
try {
|
||||
out.writeBytes(SegmentNorms.NORMS_HEADER, 0, SegmentNorms.NORMS_HEADER.length);
|
||||
out.writeBytes(bytes, owner.maxDoc());
|
||||
} finally {
|
||||
out.close();
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
try {
|
||||
owner.directory().deleteFile(normFileName);
|
||||
} catch (Throwable t) {
|
||||
// suppress this so we keep throwing the
|
||||
// original exception
|
||||
}
|
||||
}
|
||||
}
|
||||
this.dirty = false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
volatile BitVector liveDocs;
|
||||
AtomicInteger liveDocsRef = null;
|
||||
private boolean liveDocsDirty = false;
|
||||
private boolean normsDirty = false;
|
||||
|
||||
// TODO: we should move this tracking into SegmentInfo;
|
||||
// this way SegmentInfo.toString shows pending deletes
|
||||
|
@ -61,7 +60,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
|
||||
private boolean rollbackHasChanges = false;
|
||||
private boolean rollbackDeletedDocsDirty = false;
|
||||
private boolean rollbackNormsDirty = false;
|
||||
private SegmentInfo rollbackSegmentInfo;
|
||||
private int rollbackPendingDeleteCount;
|
||||
|
||||
|
@ -256,7 +254,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
if (!openReadOnly && hasChanges) {
|
||||
// My pending changes transfer to the new reader
|
||||
clone.liveDocsDirty = liveDocsDirty;
|
||||
clone.normsDirty = normsDirty;
|
||||
clone.hasChanges = hasChanges;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
@ -355,16 +352,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
|
||||
if (normsDirty) { // re-write norms
|
||||
si.initNormGen();
|
||||
for (final SegmentNorms norm : norms.values()) {
|
||||
if (norm.dirty) {
|
||||
norm.reWrite(si);
|
||||
}
|
||||
}
|
||||
}
|
||||
liveDocsDirty = false;
|
||||
normsDirty = false;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
|
@ -558,19 +546,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return norm.bytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int doc, String field, byte value)
|
||||
throws IOException {
|
||||
SegmentNorms norm = norms.get(field);
|
||||
if (norm == null) {
|
||||
// field does not store norms
|
||||
throw new IllegalStateException("Cannot setNorm for field " + field + ": norms were omitted");
|
||||
}
|
||||
|
||||
normsDirty = true;
|
||||
norm.copyOnWrite()[doc] = value; // set the value
|
||||
}
|
||||
|
||||
private void openNorms(Directory cfsDir, IOContext context) throws IOException {
|
||||
long nextNormSeek = SegmentNorms.NORMS_HEADER.length; //skip header (header unused for now)
|
||||
int maxDoc = maxDoc();
|
||||
|
@ -723,22 +698,14 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
rollbackSegmentInfo = (SegmentInfo) si.clone();
|
||||
rollbackHasChanges = hasChanges;
|
||||
rollbackDeletedDocsDirty = liveDocsDirty;
|
||||
rollbackNormsDirty = normsDirty;
|
||||
rollbackPendingDeleteCount = pendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
norm.rollbackDirty = norm.dirty;
|
||||
}
|
||||
}
|
||||
|
||||
void rollbackCommit() {
|
||||
si.reset(rollbackSegmentInfo);
|
||||
hasChanges = rollbackHasChanges;
|
||||
liveDocsDirty = rollbackDeletedDocsDirty;
|
||||
normsDirty = rollbackNormsDirty;
|
||||
pendingDeleteCount = rollbackPendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
norm.dirty = norm.rollbackDirty;
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the directory this index resides in. */
|
||||
|
|
|
@ -109,13 +109,4 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
ensureOpen();
|
||||
return readerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNorm(int n, String field, byte value)
|
||||
throws CorruptIndexException, IOException {
|
||||
synchronized(normsCache) {
|
||||
normsCache.remove(field);
|
||||
}
|
||||
in.doSetNorm(n, field, value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -378,28 +377,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// make sure we can do delete & setNorm against this segment:
|
||||
reader = IndexReader.open(dir, false);
|
||||
searcher = newSearcher(reader);
|
||||
Term searchTerm = new Term("id", "6");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("wrong delete count", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
searcher.close();
|
||||
|
||||
// make sure they "took":
|
||||
reader = IndexReader.open(dir, true);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 43, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 43, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// fully merge
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
|
@ -408,10 +385,10 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 43, hits.length);
|
||||
assertEquals("wrong number of hits", 44, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
doTestHits(hits, 43, searcher.getIndexReader());
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 44, searcher.getIndexReader());
|
||||
assertEquals("wrong first document", "21", d.get("id"));
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
|
@ -432,26 +409,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// make sure we can do a delete & setNorm against this segment:
|
||||
reader = IndexReader.open(dir, false);
|
||||
Term searchTerm = new Term("id", "6");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("wrong delete count", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(22, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
|
||||
// make sure they "took":
|
||||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 33, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 33, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
// fully merge
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
|
||||
writer.forceMerge(1);
|
||||
|
@ -460,10 +417,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
reader = IndexReader.open(dir);
|
||||
searcher = new IndexSearcher(reader);
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 33, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
assertEquals("wrong first document", "22", d.get("id"));
|
||||
doTestHits(hits, 33, searcher.getIndexReader());
|
||||
assertEquals("wrong number of hits", 34, hits.length);
|
||||
doTestHits(hits, 34, searcher.getIndexReader());
|
||||
searcher.close();
|
||||
reader.close();
|
||||
|
||||
|
@ -510,9 +465,6 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
|
||||
// Set one norm so we get a .s0 file:
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
@ -554,34 +506,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
|
||||
// Set one norm so we get a .s0 file:
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
|
||||
// The numbering of fields can vary depending on which
|
||||
// JRE is in use. On some JREs we see content bound to
|
||||
// field 0; on others, field 1. So, here we have to
|
||||
// figure out which field number corresponds to
|
||||
// "content", and then set our expected file names below
|
||||
// accordingly:
|
||||
CompoundFileDirectory cfsReader = new CompoundFileDirectory(dir, "_0.cfs", newIOContext(random), false);
|
||||
FieldInfosReader infosReader = Codec.getDefault().fieldInfosFormat().getFieldInfosReader();
|
||||
FieldInfos fieldInfos = infosReader.read(cfsReader, "_0", IOContext.READONCE);
|
||||
int contentFieldIndex = -1;
|
||||
for (FieldInfo fi : fieldInfos) {
|
||||
if (fi.name.equals("content")) {
|
||||
contentFieldIndex = fi.number;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cfsReader.close();
|
||||
assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
|
||||
|
||||
// Now verify file names:
|
||||
String[] expected = new String[] {"_0.cfs", "_0.cfe",
|
||||
"_0_1.del",
|
||||
"_0_1.s" + contentFieldIndex,
|
||||
"segments_2",
|
||||
"segments.gen"};
|
||||
|
||||
|
|
|
@ -654,8 +654,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, policy, false);
|
||||
reader.deleteDocument(3*i+1);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(4*i+1, "content", sim.encodeNormValue(2.0F));
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(16*(1+i), hits.length);
|
||||
|
@ -783,8 +781,6 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, policy, false);
|
||||
reader.deleteDocument(3);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(5, "content", sim.encodeNormValue(2.0F));
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(16, hits.length);
|
||||
|
|
|
@ -72,9 +72,6 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
Term searchTerm = new Term("id", "7");
|
||||
int delCount = reader.deleteDocuments(searchTerm);
|
||||
assertEquals("didn't delete the right number of documents", 1, delCount);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
// Set one norm so we get a .s0 file:
|
||||
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
|
||||
reader.close();
|
||||
|
||||
// Now, artificially create an extra .del file & extra
|
||||
|
@ -87,47 +84,6 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
}
|
||||
*/
|
||||
|
||||
// The numbering of fields can vary depending on which
|
||||
// JRE is in use. On some JREs we see content bound to
|
||||
// field 0; on others, field 1. So, here we have to
|
||||
// figure out which field number corresponds to
|
||||
// "content", and then set our expected file names below
|
||||
// accordingly:
|
||||
CompoundFileDirectory cfsReader = new CompoundFileDirectory(dir, "_2.cfs", newIOContext(random), false);
|
||||
FieldInfosReader infosReader = Codec.getDefault().fieldInfosFormat().getFieldInfosReader();
|
||||
FieldInfos fieldInfos = infosReader.read(cfsReader, "2", IOContext.READONCE);
|
||||
int contentFieldIndex = -1;
|
||||
for (FieldInfo fi : fieldInfos) {
|
||||
if (fi.name.equals("content")) {
|
||||
contentFieldIndex = fi.number;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cfsReader.close();
|
||||
assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
|
||||
|
||||
String normSuffix = "s" + contentFieldIndex;
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that actually has a separate norms file
|
||||
// already:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_2_2." + normSuffix);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that actually has a separate norms file
|
||||
// already, using the "not compound file" extension:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_2_2.f" + contentFieldIndex);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that does not have a separate norms
|
||||
// file already:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_1_1." + normSuffix);
|
||||
|
||||
// Create a bogus separate norms file for a
|
||||
// segment/field that does not have a separate norms
|
||||
// file already using the "not compound file" extension:
|
||||
copyFile(dir, "_2_1." + normSuffix, "_1_1.f" + contentFieldIndex);
|
||||
|
||||
// Create a bogus separate del file for a
|
||||
// segment that already has a separate del file:
|
||||
copyFile(dir, "_0_1.del", "_0_2.del");
|
||||
|
|
|
@ -417,14 +417,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// expected
|
||||
}
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
|
||||
fail("setNorm after close failed to throw IOException");
|
||||
} catch (AlreadyClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
reader.undeleteAll();
|
||||
fail("undeleteAll after close failed to throw IOException");
|
||||
|
@ -458,14 +450,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// expected
|
||||
}
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
|
||||
fail("setNorm should have hit LockObtainFailedException");
|
||||
} catch (LockObtainFailedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
reader.undeleteAll();
|
||||
fail("undeleteAll should have hit LockObtainFailedException");
|
||||
|
@ -477,81 +461,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure you can set norms & commit even if a reader
|
||||
// is open against the index:
|
||||
public void testWritingNorms() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
// now open reader & set norm for doc 0
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
|
||||
// we should be holding the write lock now:
|
||||
assertTrue("locked", IndexWriter.isLocked(dir));
|
||||
|
||||
reader.commit();
|
||||
|
||||
// we should not be holding the write lock now:
|
||||
assertTrue("not locked", !IndexWriter.isLocked(dir));
|
||||
|
||||
// open a 2nd reader:
|
||||
IndexReader reader2 = IndexReader.open(dir, false);
|
||||
|
||||
// set norm again for doc 0
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(3.0f));
|
||||
assertTrue("locked", IndexWriter.isLocked(dir));
|
||||
|
||||
reader.close();
|
||||
|
||||
// we should not be holding the write lock now:
|
||||
assertTrue("not locked", !IndexWriter.isLocked(dir));
|
||||
|
||||
reader2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
// Make sure you can set norms & commit, and there are
|
||||
// no extra norms files left:
|
||||
public void testWritingNormsNoReader() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = null;
|
||||
IndexReader reader = null;
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
// now open reader & set norm for doc 0 (writes to
|
||||
// _0_1.s0)
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
|
||||
// now open reader again & set norm for doc 0 (writes to _0_2.s0)
|
||||
reader = IndexReader.open(dir, false);
|
||||
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
|
||||
reader.close();
|
||||
assertFalse("failed to remove first generation norms file on writing second generation",
|
||||
dir.fileExists("_0_1.s0"));
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/* ??? public void testOpenEmptyDirectory() throws IOException{
|
||||
String dirName = "test.empty";
|
||||
File fileDirName = new File(dirName);
|
||||
|
@ -714,40 +623,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testExceptionReleaseWriteLockJIRA768() throws IOException {
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
addDoc(writer, "aaa");
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
try {
|
||||
reader.deleteDocument(1);
|
||||
fail("did not hit exception when deleting an invalid doc number");
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
// expected
|
||||
}
|
||||
reader.close();
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
fail("write lock is still held after close");
|
||||
}
|
||||
|
||||
reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
reader.setNorm(1, "content", sim.encodeNormValue(2.0f));
|
||||
fail("did not hit exception when calling setNorm on an invalid doc number");
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
// expected
|
||||
}
|
||||
reader.close();
|
||||
if (IndexWriter.isLocked(dir)) {
|
||||
fail("write lock is still held after close");
|
||||
}
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testOpenReaderAfterDelete() throws IOException {
|
||||
File dirFile = _TestUtil.getTempDir("deletetest");
|
||||
Directory dir = newFSDirectory(dirFile);
|
||||
|
|
|
@ -103,27 +103,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// create single-segment index, open non-readOnly
|
||||
// SegmentReader, add docs, reopen to multireader, then do
|
||||
// delete
|
||||
public void testReopenSegmentReaderToMultiReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
|
||||
TestIndexReaderReopen.modifyIndex(5, dir1);
|
||||
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
assertTrue(reader1 != reader2);
|
||||
|
||||
assertTrue(deleteWorked(1, reader2));
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
// open non-readOnly reader1, clone to readOnly reader2
|
||||
public void testCloneWriteableToReadOnly() throws Exception {
|
||||
|
@ -244,73 +223,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testParallelReader() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir2, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = IndexReader.open(dir2, false);
|
||||
|
||||
ParallelReader pr1 = new ParallelReader();
|
||||
pr1.add(r1);
|
||||
pr1.add(r2);
|
||||
|
||||
performDefaultTests(pr1);
|
||||
pr1.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* 1. Get a norm from the original reader 2. Clone the original reader 3.
|
||||
* Delete a document and set the norm of the cloned reader 4. Verify the norms
|
||||
* are not the same on each reader 5. Verify the doc deleted is only in the
|
||||
* cloned reader 6. Try to delete a document in the original reader, an
|
||||
* exception should be thrown
|
||||
*
|
||||
* @param r1 IndexReader to perform tests on
|
||||
* @throws Exception
|
||||
*/
|
||||
private void performDefaultTests(IndexReader r1) throws Exception {
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
float norm1 = sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
|
||||
|
||||
IndexReader pr1Clone = (IndexReader) r1.clone();
|
||||
pr1Clone.deleteDocument(10);
|
||||
pr1Clone.setNorm(4, "field1", sim.encodeNormValue(0.5f));
|
||||
assertTrue(sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1);
|
||||
assertTrue(sim.decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1);
|
||||
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertTrue(liveDocs == null || liveDocs.get(10));
|
||||
assertFalse(MultiFields.getLiveDocs(pr1Clone).get(10));
|
||||
|
||||
// try to update the original reader, which should throw an exception
|
||||
try {
|
||||
r1.deleteDocument(11);
|
||||
fail("Tried to delete doc 11 and an exception should have been thrown");
|
||||
} catch (Exception exception) {
|
||||
// expectted
|
||||
}
|
||||
pr1Clone.close();
|
||||
}
|
||||
|
||||
public void testMixedReaders() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir2, true);
|
||||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = IndexReader.open(dir2, false);
|
||||
|
||||
MultiReader multiReader = new MultiReader(r1, r2);
|
||||
performDefaultTests(multiReader);
|
||||
multiReader.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
public void testSegmentReaderUndeleteall() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
|
@ -329,17 +241,12 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
origSegmentReader.deleteDocument(1);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
origSegmentReader.setNorm(4, "field1", sim.encodeNormValue(0.5f));
|
||||
|
||||
SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
|
||||
.clone();
|
||||
assertDelDocsRefCountEquals(2, origSegmentReader);
|
||||
origSegmentReader.close();
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
// check the norm refs
|
||||
SegmentNorms norm = clonedSegmentReader.norms.get("field1");
|
||||
assertEquals(1, norm.bytesRef().get());
|
||||
clonedSegmentReader.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
@ -427,28 +334,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
dir1.close();
|
||||
}
|
||||
|
||||
// LUCENE-1648
|
||||
public void testCloneWithSetNorm() throws Throwable {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader orig = IndexReader.open(dir1, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
orig.setNorm(1, "field1", sim.encodeNormValue(17.0f));
|
||||
final byte encoded = sim.encodeNormValue(17.0f);
|
||||
assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]);
|
||||
|
||||
// the cloned segmentreader should have 2 references, 1 to itself, and 1 to
|
||||
// the original segmentreader
|
||||
IndexReader clonedReader = (IndexReader) orig.clone();
|
||||
orig.close();
|
||||
clonedReader.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir1, false);
|
||||
assertEquals(encoded, MultiNorms.norms(r, "field1")[1]);
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
|
||||
int doc) {
|
||||
assertEquals(reader.getLiveDocs().get(doc), reader2.getLiveDocs().get(doc));
|
||||
|
|
|
@ -1,362 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Random;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests cloning IndexReader norms
|
||||
*/
|
||||
public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
||||
|
||||
private class SimilarityProviderOne extends DefaultSimilarityProvider {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
// diable length norm
|
||||
return encodeNormValue(state.getBoost());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static final int NUM_FIELDS = 10;
|
||||
|
||||
private SimilarityProvider similarityProviderOne;
|
||||
|
||||
private Analyzer anlzr;
|
||||
|
||||
private int numDocNorms;
|
||||
|
||||
private ArrayList<Float> norms;
|
||||
|
||||
private ArrayList<Float> modifiedNorms;
|
||||
|
||||
private float lastNorm = 0;
|
||||
|
||||
private float normDelta = (float) 0.001;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
similarityProviderOne = new SimilarityProviderOne();
|
||||
anlzr = new MockAnalyzer(random);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that norms values are preserved as the index is maintained. Including
|
||||
* separate norms. Including merging indexes with seprate norms. Including
|
||||
* full merge.
|
||||
*/
|
||||
public void testNorms() throws IOException {
|
||||
// test with a single index: index1
|
||||
Directory dir1 = newDirectory();
|
||||
IndexWriter.unlock(dir1);
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
|
||||
createIndex(random, dir1);
|
||||
doTestNorms(random, dir1);
|
||||
|
||||
// test with a single index: index2
|
||||
ArrayList<Float> norms1 = norms;
|
||||
ArrayList<Float> modifiedNorms1 = modifiedNorms;
|
||||
int numDocNorms1 = numDocNorms;
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
numDocNorms = 0;
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
|
||||
createIndex(random, dir2);
|
||||
doTestNorms(random, dir2);
|
||||
|
||||
// add index1 and index2 to a third index: index3
|
||||
Directory dir3 = newDirectory();
|
||||
|
||||
createIndex(random, dir3);
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now addIndexes/full merge");
|
||||
}
|
||||
IndexWriter iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.addIndexes(dir1, dir2);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
|
||||
norms1.addAll(norms);
|
||||
norms = norms1;
|
||||
modifiedNorms1.addAll(modifiedNorms);
|
||||
modifiedNorms = modifiedNorms1;
|
||||
numDocNorms += numDocNorms1;
|
||||
|
||||
// test with index3
|
||||
verifyIndex(dir3);
|
||||
doTestNorms(random, dir3);
|
||||
|
||||
// now with full merge
|
||||
iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
verifyIndex(dir3);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
dir3.close();
|
||||
}
|
||||
|
||||
// try cloning and reopening the norms
|
||||
private void doTestNorms(Random random, Directory dir) throws IOException {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now doTestNorms");
|
||||
}
|
||||
addDocs(random, dir, 12, true);
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
verifyIndex(ir);
|
||||
modifyNormsForF1(ir);
|
||||
IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir, false);//ir.clone();
|
||||
verifyIndex(irc);
|
||||
|
||||
modifyNormsForF1(irc);
|
||||
|
||||
IndexReader irc3 = (IndexReader) irc.clone();
|
||||
verifyIndex(irc3);
|
||||
modifyNormsForF1(irc3);
|
||||
verifyIndex(irc3);
|
||||
irc3.flush();
|
||||
|
||||
ir.close();
|
||||
irc.close();
|
||||
irc3.close();
|
||||
}
|
||||
|
||||
public void testNormsClose() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader reader1 = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
reader1.norms("field1");
|
||||
SegmentNorms r1norm = reader1.norms.get("field1");
|
||||
AtomicInteger r1BytesRef = r1norm.bytesRef();
|
||||
SegmentReader reader2 = (SegmentReader)reader1.clone();
|
||||
assertEquals(2, r1norm.bytesRef().get());
|
||||
reader1.close();
|
||||
assertEquals(1, r1BytesRef.get());
|
||||
reader2.norms("field1");
|
||||
reader2.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
public void testNormsRefCounting() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
|
||||
IndexReader reader2C = (IndexReader) reader1.clone();
|
||||
SegmentReader segmentReader2C = getOnlySegmentReader(reader2C);
|
||||
segmentReader2C.norms("field1"); // load the norms for the field
|
||||
SegmentNorms reader2CNorm = segmentReader2C.norms.get("field1");
|
||||
assertTrue("reader2CNorm.bytesRef()=" + reader2CNorm.bytesRef(), reader2CNorm.bytesRef().get() == 2);
|
||||
|
||||
|
||||
|
||||
IndexReader reader3C = (IndexReader) reader2C.clone();
|
||||
SegmentReader segmentReader3C = getOnlySegmentReader(reader3C);
|
||||
SegmentNorms reader3CCNorm = segmentReader3C.norms.get("field1");
|
||||
assertEquals(3, reader3CCNorm.bytesRef().get());
|
||||
|
||||
// edit a norm and the refcount should be 1
|
||||
IndexReader reader4C = (IndexReader) reader3C.clone();
|
||||
SegmentReader segmentReader4C = getOnlySegmentReader(reader4C);
|
||||
assertEquals(4, reader3CCNorm.bytesRef().get());
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader4C.setNorm(5, "field1", sim.encodeNormValue(0.33f));
|
||||
|
||||
// generate a cannot update exception in reader1
|
||||
try {
|
||||
reader3C.setNorm(1, "field1", sim.encodeNormValue(0.99f));
|
||||
fail("did not hit expected exception");
|
||||
} catch (Exception ex) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// norm values should be different
|
||||
assertTrue(sim.decodeNormValue(segmentReader3C.norms("field1")[5])
|
||||
!= sim.decodeNormValue(segmentReader4C.norms("field1")[5]));
|
||||
SegmentNorms reader4CCNorm = segmentReader4C.norms.get("field1");
|
||||
assertEquals(3, reader3CCNorm.bytesRef().get());
|
||||
assertEquals(1, reader4CCNorm.bytesRef().get());
|
||||
|
||||
IndexReader reader5C = (IndexReader) reader4C.clone();
|
||||
SegmentReader segmentReader5C = getOnlySegmentReader(reader5C);
|
||||
SegmentNorms reader5CCNorm = segmentReader5C.norms.get("field1");
|
||||
reader5C.setNorm(5, "field1", sim.encodeNormValue(0.7f));
|
||||
assertEquals(1, reader5CCNorm.bytesRef().get());
|
||||
|
||||
reader5C.close();
|
||||
reader4C.close();
|
||||
reader3C.close();
|
||||
reader2C.close();
|
||||
reader1.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void createIndex(Random random, Directory dir) throws IOException {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: createIndex");
|
||||
}
|
||||
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(true);
|
||||
iw.close();
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: done createIndex");
|
||||
}
|
||||
}
|
||||
|
||||
private void modifyNormsForF1(IndexReader ir) throws IOException {
|
||||
int n = ir.maxDoc();
|
||||
// System.out.println("modifyNormsForF1 maxDoc: "+n);
|
||||
for (int i = 0; i < n; i += 3) { // modify for every third doc
|
||||
int k = (i * 3) % modifiedNorms.size();
|
||||
float origNorm = modifiedNorms.get(i).floatValue();
|
||||
float newNorm = modifiedNorms.get(k).floatValue();
|
||||
// System.out.println("Modifying: for "+i+" from "+origNorm+" to
|
||||
// "+newNorm);
|
||||
// System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
|
||||
modifiedNorms.set(i, Float.valueOf(newNorm));
|
||||
modifiedNorms.set(k, Float.valueOf(origNorm));
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
ir.setNorm(i, "f" + 1, sim.encodeNormValue(newNorm));
|
||||
ir.setNorm(k, "f" + 1, sim.encodeNormValue(origNorm));
|
||||
// System.out.println("setNorm i: "+i);
|
||||
// break;
|
||||
}
|
||||
// ir.close();
|
||||
}
|
||||
|
||||
private void verifyIndex(Directory dir) throws IOException {
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
verifyIndex(ir);
|
||||
ir.close();
|
||||
}
|
||||
|
||||
private void verifyIndex(IndexReader ir) throws IOException {
|
||||
for (int i = 0; i < NUM_FIELDS; i++) {
|
||||
String field = "f" + i;
|
||||
byte b[] = MultiNorms.norms(ir, field);
|
||||
assertEquals("number of norms mismatches", numDocNorms, b.length);
|
||||
ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
|
||||
for (int j = 0; j < b.length; j++) {
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
float norm = sim.decodeNormValue(b[j]);
|
||||
float norm1 = storedNorms.get(j).floatValue();
|
||||
assertEquals("stored norm value of " + field + " for doc " + j + " is "
|
||||
+ norm + " - a mismatch!", norm, norm1, 0.000001);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addDocs(Random random, Directory dir, int ndocs, boolean compound)
|
||||
throws IOException {
|
||||
IndexWriterConfig conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy());
|
||||
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(compound);
|
||||
IndexWriter iw = new IndexWriter(dir, conf);
|
||||
for (int i = 0; i < ndocs; i++) {
|
||||
iw.addDocument(newDoc());
|
||||
}
|
||||
iw.close();
|
||||
}
|
||||
|
||||
// create the next document
|
||||
private Document newDoc() {
|
||||
Document d = new Document();
|
||||
float boost = nextNorm("anyfield"); // in this test the same similarity is used for all fields so it does not matter what field is passed
|
||||
|
||||
FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
|
||||
customType.setTokenized(false);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Field f = newField("f" + i, "v" + i, customType);
|
||||
f.setBoost(boost);
|
||||
d.add(f);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
// return unique norm values that are unchanged by encoding/decoding
|
||||
private float nextNorm(String fname) {
|
||||
float norm = lastNorm + normDelta;
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
do {
|
||||
float norm1 = sim.decodeNormValue(
|
||||
sim.encodeNormValue(norm));
|
||||
if (norm1 > lastNorm) {
|
||||
// System.out.println(norm1+" > "+lastNorm);
|
||||
norm = norm1;
|
||||
break;
|
||||
}
|
||||
norm += normDelta;
|
||||
} while (true);
|
||||
norms.add(numDocNorms, Float.valueOf(norm));
|
||||
modifiedNorms.add(numDocNorms, Float.valueOf(norm));
|
||||
// System.out.println("creating norm("+numDocNorms+"): "+norm);
|
||||
numDocNorms++;
|
||||
lastNorm = (norm > 10 ? 0 : norm); // there's a limit to how many distinct
|
||||
// values can be stored in a ingle byte
|
||||
return norm;
|
||||
}
|
||||
}
|
|
@ -1,229 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReaderOnDiskFull extends LuceneTestCase {
|
||||
/**
|
||||
* Make sure if reader tries to commit but hits disk
|
||||
* full that reader remains consistent and usable.
|
||||
*/
|
||||
public void testDiskFull() throws IOException {
|
||||
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
int START_COUNT = 157;
|
||||
int END_COUNT = 144;
|
||||
|
||||
// First build up a starting index:
|
||||
MockDirectoryWrapper startDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: create initial index");
|
||||
}
|
||||
for(int i=0;i<157;i++) {
|
||||
Document d = new Document();
|
||||
d.add(newField("id", Integer.toString(i), StringField.TYPE_STORED));
|
||||
d.add(newField("content", "aaa " + i, TextField.TYPE_UNSTORED));
|
||||
writer.addDocument(d);
|
||||
if (0==i%10)
|
||||
writer.commit();
|
||||
}
|
||||
writer.close();
|
||||
|
||||
{
|
||||
IndexReader r = IndexReader.open(startDir);
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail("exception when init searching: " + e);
|
||||
}
|
||||
searcher.close();
|
||||
r.close();
|
||||
}
|
||||
|
||||
long diskUsage = startDir.getRecomputedActualSizeInBytes();
|
||||
long diskFree = diskUsage+_TestUtil.nextInt(random, 50, 200);
|
||||
|
||||
IOException err = null;
|
||||
|
||||
boolean done = false;
|
||||
boolean gotExc = false;
|
||||
|
||||
// Iterate w/ ever increasing free disk space:
|
||||
while(!done) {
|
||||
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random)));
|
||||
|
||||
// If IndexReader hits disk full, it can write to
|
||||
// the same files again.
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
|
||||
// For each disk size, first try to commit against
|
||||
// dir that will hit random IOExceptions & disk
|
||||
// full; after, give it infinite disk space & turn
|
||||
// off random IOExceptions & retry w/ same reader:
|
||||
boolean success = false;
|
||||
|
||||
for(int x=0;x<2;x++) {
|
||||
|
||||
double rate = 0.05;
|
||||
double diskRatio = ((double) diskFree)/diskUsage;
|
||||
long thisDiskFree;
|
||||
String testName;
|
||||
|
||||
if (0 == x) {
|
||||
thisDiskFree = diskFree;
|
||||
if (diskRatio >= 2.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 4.0) {
|
||||
rate /= 2;
|
||||
}
|
||||
if (diskRatio >= 6.0) {
|
||||
rate = 0.0;
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: " + diskFree + " bytes");
|
||||
}
|
||||
testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
|
||||
} else {
|
||||
thisDiskFree = 0;
|
||||
rate = 0.0;
|
||||
if (VERBOSE) {
|
||||
System.out.println("\ncycle: same writer: unlimited disk space");
|
||||
}
|
||||
testName = "reader re-use after disk full";
|
||||
}
|
||||
|
||||
dir.setMaxSizeInBytes(thisDiskFree);
|
||||
dir.setRandomIOExceptionRate(rate);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
try {
|
||||
if (0 == x) {
|
||||
int docId = 12;
|
||||
for(int i=0;i<13;i++) {
|
||||
reader.deleteDocument(docId);
|
||||
reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
|
||||
docId += 12;
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
success = true;
|
||||
if (0 == x) {
|
||||
done = true;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" hit IOException: " + e);
|
||||
e.printStackTrace(System.out);
|
||||
}
|
||||
err = e;
|
||||
gotExc = true;
|
||||
if (1 == x) {
|
||||
e.printStackTrace();
|
||||
fail(testName + " hit IOException after disk space was freed up");
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, verify index is not corrupt, and, if
|
||||
// we succeeded, we see all docs changed, and if
|
||||
// we failed, we see either all docs or no docs
|
||||
// changed (transactional semantics):
|
||||
IndexReader newReader = null;
|
||||
try {
|
||||
newReader = IndexReader.open(dir, false);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
|
||||
}
|
||||
/*
|
||||
int result = newReader.docFreq(searchTerm);
|
||||
if (success) {
|
||||
if (result != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result != START_COUNT && result != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
IndexSearcher searcher = newSearcher(newReader);
|
||||
ScoreDoc[] hits = null;
|
||||
try {
|
||||
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
fail(testName + ": exception when searching: " + e);
|
||||
}
|
||||
int result2 = hits.length;
|
||||
if (success) {
|
||||
if (result2 != END_COUNT) {
|
||||
fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
|
||||
}
|
||||
} else {
|
||||
// On hitting exception we still may have added
|
||||
// all docs:
|
||||
if (result2 != START_COUNT && result2 != END_COUNT) {
|
||||
err.printStackTrace();
|
||||
fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
searcher.close();
|
||||
newReader.close();
|
||||
|
||||
if (result2 == END_COUNT) {
|
||||
if (!gotExc)
|
||||
fail("never hit disk full");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dir.close();
|
||||
|
||||
// Try again with more bytes of free space:
|
||||
diskFree += TEST_NIGHTLY ? _TestUtil.nextInt(random, 5, 20) : _TestUtil.nextInt(random, 50, 200);
|
||||
}
|
||||
|
||||
startDir.close();
|
||||
}
|
||||
}
|
|
@ -48,99 +48,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReaderReopen extends LuceneTestCase {
|
||||
|
||||
public void testReopen() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
|
||||
createIndex(random, dir1, false);
|
||||
performDefaultTests(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir1);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir1, false);
|
||||
}
|
||||
|
||||
});
|
||||
dir1.close();
|
||||
|
||||
final Directory dir2 = newDirectory();
|
||||
|
||||
createIndex(random, dir2, true);
|
||||
performDefaultTests(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir2);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir2, false);
|
||||
}
|
||||
|
||||
});
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
public void testParallelReaderReopen() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
performDefaultTests(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir1);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir2);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
return pr;
|
||||
}
|
||||
|
||||
});
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
|
||||
final Directory dir3 = newDirectory();
|
||||
createIndex(random, dir3, true);
|
||||
final Directory dir4 = newDirectory();
|
||||
createIndex(random, dir4, true);
|
||||
|
||||
performTestsWithExceptionInReopen(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir3);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir4);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir3, false));
|
||||
pr.add(IndexReader.open(dir4, false));
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
pr.add(new FilterIndexReader(IndexReader.open(dir3, false)));
|
||||
return pr;
|
||||
}
|
||||
|
||||
});
|
||||
dir3.close();
|
||||
dir4.close();
|
||||
}
|
||||
|
||||
// LUCENE-1228: IndexWriter.commit() does not update the index version
|
||||
// populate an index in iterations.
|
||||
|
@ -209,744 +116,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMultiReaderReopen() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, true);
|
||||
|
||||
final Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
performDefaultTests(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir1);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir2);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(IndexReader.open(dir1, false),
|
||||
IndexReader.open(dir2, false));
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
|
||||
final Directory dir3 = newDirectory();
|
||||
createIndex(random, dir3, true);
|
||||
|
||||
final Directory dir4 = newDirectory();
|
||||
createIndex(random, dir4, true);
|
||||
|
||||
performTestsWithExceptionInReopen(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
TestIndexReaderReopen.modifyIndex(i, dir3);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir4);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(IndexReader.open(dir3, false),
|
||||
IndexReader.open(dir4, false),
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
new FilterIndexReader(IndexReader.open(dir3, false)));
|
||||
}
|
||||
|
||||
});
|
||||
dir3.close();
|
||||
dir4.close();
|
||||
}
|
||||
|
||||
public void testMixedReaders() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, true);
|
||||
final Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
final Directory dir3 = newDirectory();
|
||||
createIndex(random, dir3, false);
|
||||
final Directory dir4 = newDirectory();
|
||||
createIndex(random, dir4, true);
|
||||
final Directory dir5 = newDirectory();
|
||||
createIndex(random, dir5, false);
|
||||
|
||||
performDefaultTests(new TestReopen() {
|
||||
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
// only change norms in this index to maintain the same number of docs for each of ParallelReader's subreaders
|
||||
if (i == 1) TestIndexReaderReopen.modifyIndex(i, dir1);
|
||||
|
||||
TestIndexReaderReopen.modifyIndex(i, dir4);
|
||||
TestIndexReaderReopen.modifyIndex(i, dir5);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
MultiReader mr = new MultiReader(IndexReader.open(dir3, false), IndexReader.open(dir4, false));
|
||||
return new MultiReader(pr, mr, IndexReader.open(dir5, false));
|
||||
}
|
||||
});
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
dir3.close();
|
||||
dir4.close();
|
||||
dir5.close();
|
||||
}
|
||||
|
||||
private void performDefaultTests(TestReopen test) throws Exception {
|
||||
|
||||
IndexReader index1 = test.openReader();
|
||||
IndexReader index2 = test.openReader();
|
||||
|
||||
TestIndexReader.assertIndexEquals(index1, index2);
|
||||
|
||||
// verify that reopen() does not return a new reader instance
|
||||
// in case the index has no changes
|
||||
ReaderCouple couple = refreshReader(index2, false);
|
||||
assertTrue(couple.refreshedReader == index2);
|
||||
|
||||
couple = refreshReader(index2, test, 0, true);
|
||||
index1.close();
|
||||
index1 = couple.newReader;
|
||||
|
||||
IndexReader index2_refreshed = couple.refreshedReader;
|
||||
index2.close();
|
||||
|
||||
// test if refreshed reader and newly opened reader return equal results
|
||||
TestIndexReader.assertIndexEquals(index1, index2_refreshed);
|
||||
|
||||
index2_refreshed.close();
|
||||
assertReaderClosed(index2, true, true);
|
||||
assertReaderClosed(index2_refreshed, true, true);
|
||||
|
||||
index2 = test.openReader();
|
||||
|
||||
for (int i = 1; i < 4; i++) {
|
||||
|
||||
index1.close();
|
||||
couple = refreshReader(index2, test, i, true);
|
||||
// refresh IndexReader
|
||||
index2.close();
|
||||
|
||||
index2 = couple.refreshedReader;
|
||||
index1 = couple.newReader;
|
||||
TestIndexReader.assertIndexEquals(index1, index2);
|
||||
}
|
||||
|
||||
index1.close();
|
||||
index2.close();
|
||||
assertReaderClosed(index1, true, true);
|
||||
assertReaderClosed(index2, true, true);
|
||||
}
|
||||
|
||||
public void testReferenceCounting() throws IOException {
|
||||
for (int mode = 0; mode < 4; mode++) {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, true);
|
||||
|
||||
IndexReader reader0 = IndexReader.open(dir1, false);
|
||||
assertRefCountEquals(1, reader0);
|
||||
|
||||
assertTrue(reader0 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders0 = reader0.getSequentialSubReaders();
|
||||
for (int i = 0; i < subReaders0.length; i++) {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
}
|
||||
|
||||
// delete first document, so that only one of the subReaders have to be re-opened
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(0);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader1 = refreshReader(reader0, true).refreshedReader;
|
||||
assertTrue(reader1 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders1 = reader1.getSequentialSubReaders();
|
||||
assertEquals(subReaders0.length, subReaders1.length);
|
||||
|
||||
for (int i = 0; i < subReaders0.length; i++) {
|
||||
if (subReaders0[i] != subReaders1[i]) {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
assertRefCountEquals(1, subReaders1[i]);
|
||||
} else {
|
||||
assertRefCountEquals(2, subReaders0[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// delete first document, so that only one of the subReaders have to be re-opened
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(1);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader2 = refreshReader(reader1, true).refreshedReader;
|
||||
assertTrue(reader2 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders2 = reader2.getSequentialSubReaders();
|
||||
assertEquals(subReaders1.length, subReaders2.length);
|
||||
|
||||
for (int i = 0; i < subReaders2.length; i++) {
|
||||
if (subReaders2[i] == subReaders1[i]) {
|
||||
if (subReaders1[i] == subReaders0[i]) {
|
||||
assertRefCountEquals(3, subReaders2[i]);
|
||||
} else {
|
||||
assertRefCountEquals(2, subReaders2[i]);
|
||||
}
|
||||
} else {
|
||||
assertRefCountEquals(1, subReaders2[i]);
|
||||
if (subReaders0[i] == subReaders1[i]) {
|
||||
assertRefCountEquals(2, subReaders2[i]);
|
||||
assertRefCountEquals(2, subReaders0[i]);
|
||||
} else {
|
||||
assertRefCountEquals(1, subReaders0[i]);
|
||||
assertRefCountEquals(1, subReaders1[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IndexReader reader3 = refreshReader(reader0, true).refreshedReader;
|
||||
assertTrue(reader3 instanceof DirectoryReader);
|
||||
IndexReader[] subReaders3 = reader3.getSequentialSubReaders();
|
||||
assertEquals(subReaders3.length, subReaders0.length);
|
||||
|
||||
// try some permutations
|
||||
switch (mode) {
|
||||
case 0:
|
||||
reader0.close();
|
||||
reader1.close();
|
||||
reader2.close();
|
||||
reader3.close();
|
||||
break;
|
||||
case 1:
|
||||
reader3.close();
|
||||
reader2.close();
|
||||
reader1.close();
|
||||
reader0.close();
|
||||
break;
|
||||
case 2:
|
||||
reader2.close();
|
||||
reader3.close();
|
||||
reader0.close();
|
||||
reader1.close();
|
||||
break;
|
||||
case 3:
|
||||
reader1.close();
|
||||
reader3.close();
|
||||
reader2.close();
|
||||
reader0.close();
|
||||
break;
|
||||
}
|
||||
|
||||
assertReaderClosed(reader0, true, true);
|
||||
assertReaderClosed(reader1, true, true);
|
||||
assertReaderClosed(reader2, true, true);
|
||||
assertReaderClosed(reader3, true, true);
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testReferenceCountingMultiReader() throws IOException {
|
||||
for (int mode = 0; mode <=1; mode++) {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, false);
|
||||
Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
IndexReader initReader2 = IndexReader.open(dir2, false);
|
||||
IndexReader multiReader1 = new MultiReader(new IndexReader[] {reader1, initReader2}, (mode == 0));
|
||||
modifyIndex(0, dir2);
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
IndexReader multiReader2 = IndexReader.openIfChanged(multiReader1);
|
||||
assertNotNull(multiReader2);
|
||||
// index1 hasn't changed, so multiReader2 should share reader1 now with multiReader1
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
modifyIndex(0, dir1);
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
assertNull(IndexReader.openIfChanged(reader2));
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
modifyIndex(1, dir1);
|
||||
IndexReader reader3 = IndexReader.openIfChanged(reader2);
|
||||
assertNotNull(reader3);
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
assertRefCountEquals(1, reader2);
|
||||
|
||||
multiReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
multiReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
reader1.close();
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
multiReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
multiReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, true);
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testReferenceCountingParallelReader() throws IOException {
|
||||
for (int mode = 0; mode <=1; mode++) {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, false);
|
||||
Directory dir2 = newDirectory();
|
||||
createIndex(random, dir2, true);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
ParallelReader parallelReader1 = new ParallelReader(mode == 0);
|
||||
parallelReader1.add(reader1);
|
||||
IndexReader initReader2 = IndexReader.open(dir2, false);
|
||||
parallelReader1.add(initReader2);
|
||||
modifyIndex(1, dir2);
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
IndexReader parallelReader2 = IndexReader.openIfChanged(parallelReader1);
|
||||
assertNotNull(parallelReader2);
|
||||
assertNull(IndexReader.openIfChanged(parallelReader2));
|
||||
// index1 hasn't changed, so parallelReader2 should share reader1 now with multiReader1
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
modifyIndex(0, dir1);
|
||||
modifyIndex(0, dir2);
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
modifyIndex(4, dir1);
|
||||
IndexReader reader3 = IndexReader.openIfChanged(reader2);
|
||||
assertNotNull(reader3);
|
||||
assertRefCountEquals(2 + mode, reader1);
|
||||
assertRefCountEquals(1, reader2);
|
||||
|
||||
parallelReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
parallelReader1.close();
|
||||
assertRefCountEquals(1 + mode, reader1);
|
||||
|
||||
if (mode == 1) {
|
||||
initReader2.close();
|
||||
}
|
||||
|
||||
reader1.close();
|
||||
assertRefCountEquals(1, reader1);
|
||||
|
||||
parallelReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
parallelReader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, false);
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertReaderClosed(reader1, true, true);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testNormsRefCounting() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
createIndex(random, dir1, false);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
SegmentReader segmentReader1 = getOnlySegmentReader(reader1);
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(0);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader2 = IndexReader.openIfChanged(reader1);
|
||||
assertNotNull(reader2);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
modifier.setNorm(1, "field1", sim.encodeNormValue(50f));
|
||||
modifier.setNorm(1, "field2", sim.encodeNormValue(50f));
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader3 = IndexReader.openIfChanged(reader2);
|
||||
assertNotNull(reader3);
|
||||
SegmentReader segmentReader3 = getOnlySegmentReader(reader3);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(2);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader4 = IndexReader.openIfChanged(reader3);
|
||||
assertNotNull(reader4);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(3);
|
||||
modifier.close();
|
||||
|
||||
IndexReader reader5 = IndexReader.openIfChanged(reader3);
|
||||
assertNotNull(reader5);
|
||||
|
||||
// Now reader2-reader5 references reader1. reader1 and reader2
|
||||
// share the same norms. reader3, reader4, reader5 also share norms.
|
||||
assertRefCountEquals(1, reader1);
|
||||
assertFalse(segmentReader1.normsClosed());
|
||||
|
||||
reader1.close();
|
||||
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader1.normsClosed());
|
||||
|
||||
reader2.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
// now the norms for field1 and field2 should be closed
|
||||
assertTrue(segmentReader1.normsClosed("field1"));
|
||||
assertTrue(segmentReader1.normsClosed("field2"));
|
||||
|
||||
// but the norms for field3 and field4 should still be open
|
||||
assertFalse(segmentReader1.normsClosed("field3"));
|
||||
assertFalse(segmentReader1.normsClosed("field4"));
|
||||
|
||||
reader3.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader3.normsClosed());
|
||||
reader5.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
assertFalse(segmentReader3.normsClosed());
|
||||
reader4.close();
|
||||
assertRefCountEquals(0, reader1);
|
||||
|
||||
// and now all norms that reader1 used should be closed
|
||||
assertTrue(segmentReader1.normsClosed());
|
||||
|
||||
// now that reader3, reader4 and reader5 are closed,
|
||||
// the norms that those three readers shared should be
|
||||
// closed as well
|
||||
assertTrue(segmentReader3.normsClosed());
|
||||
|
||||
dir1.close();
|
||||
}
|
||||
|
||||
private void performTestsWithExceptionInReopen(TestReopen test) throws Exception {
|
||||
IndexReader index1 = test.openReader();
|
||||
IndexReader index2 = test.openReader();
|
||||
|
||||
TestIndexReader.assertIndexEquals(index1, index2);
|
||||
|
||||
try {
|
||||
refreshReader(index1, test, 0, true);
|
||||
fail("Expected exception not thrown.");
|
||||
} catch (Exception e) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
// index2 should still be usable and unaffected by the failed reopen() call
|
||||
TestIndexReader.assertIndexEquals(index1, index2);
|
||||
|
||||
index1.close();
|
||||
index2.close();
|
||||
}
|
||||
|
||||
public void testThreadSafety() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
// NOTE: this also controls the number of threads!
|
||||
final int n = _TestUtil.nextInt(random, 20, 40);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
for (int i = 0; i < n; i++) {
|
||||
writer.addDocument(createDocument(i, 3));
|
||||
}
|
||||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
|
||||
final TestReopen test = new TestReopen() {
|
||||
@Override
|
||||
protected void modifyIndex(int i) throws IOException {
|
||||
if (i % 3 == 0) {
|
||||
IndexReader modifier = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
modifier.setNorm(i, "field1", sim.encodeNormValue(50f));
|
||||
modifier.close();
|
||||
} else if (i % 3 == 1) {
|
||||
IndexReader modifier = IndexReader.open(dir, false);
|
||||
modifier.deleteDocument(i % modifier.maxDoc());
|
||||
modifier.close();
|
||||
} else {
|
||||
IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
modifier.addDocument(createDocument(n + i, 6));
|
||||
modifier.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return IndexReader.open(dir, false);
|
||||
}
|
||||
};
|
||||
|
||||
final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
|
||||
IndexReader firstReader = IndexReader.open(dir, false);
|
||||
IndexReader reader = firstReader;
|
||||
final Random rnd = random;
|
||||
|
||||
ReaderThread[] threads = new ReaderThread[n];
|
||||
final Set<IndexReader> readersToClose = Collections.synchronizedSet(new HashSet<IndexReader>());
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (i % 2 == 0) {
|
||||
IndexReader refreshed = IndexReader.openIfChanged(reader);
|
||||
if (refreshed != null) {
|
||||
readersToClose.add(reader);
|
||||
reader = refreshed;
|
||||
}
|
||||
}
|
||||
final IndexReader r = reader;
|
||||
|
||||
final int index = i;
|
||||
|
||||
ReaderThreadTask task;
|
||||
|
||||
if (i < 4 || (i >=10 && i < 14) || i > 18) {
|
||||
task = new ReaderThreadTask() {
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
while (!stopped) {
|
||||
if (index % 2 == 0) {
|
||||
// refresh reader synchronized
|
||||
ReaderCouple c = (refreshReader(r, test, index, true));
|
||||
readersToClose.add(c.newReader);
|
||||
readersToClose.add(c.refreshedReader);
|
||||
readers.add(c);
|
||||
// prevent too many readers
|
||||
break;
|
||||
} else {
|
||||
// not synchronized
|
||||
IndexReader refreshed = IndexReader.openIfChanged(r);
|
||||
if (refreshed == null) {
|
||||
refreshed = r;
|
||||
}
|
||||
|
||||
IndexSearcher searcher = newSearcher(refreshed);
|
||||
ScoreDoc[] hits = searcher.search(
|
||||
new TermQuery(new Term("field1", "a" + rnd.nextInt(refreshed.maxDoc()))),
|
||||
null, 1000).scoreDocs;
|
||||
if (hits.length > 0) {
|
||||
searcher.doc(hits[0].doc);
|
||||
}
|
||||
searcher.close();
|
||||
if (refreshed != r) {
|
||||
refreshed.close();
|
||||
}
|
||||
}
|
||||
synchronized(this) {
|
||||
wait(_TestUtil.nextInt(random, 1, 100));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
} else {
|
||||
task = new ReaderThreadTask() {
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
while (!stopped) {
|
||||
int numReaders = readers.size();
|
||||
if (numReaders > 0) {
|
||||
ReaderCouple c = readers.get(rnd.nextInt(numReaders));
|
||||
TestIndexReader.assertIndexEquals(c.newReader, c.refreshedReader);
|
||||
}
|
||||
|
||||
synchronized(this) {
|
||||
wait(_TestUtil.nextInt(random, 1, 100));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
threads[i] = new ReaderThread(task);
|
||||
threads[i].start();
|
||||
}
|
||||
|
||||
synchronized(this) {
|
||||
wait(1000);
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (threads[i] != null) {
|
||||
threads[i].stopThread();
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < n; i++) {
|
||||
if (threads[i] != null) {
|
||||
threads[i].join();
|
||||
if (threads[i].error != null) {
|
||||
String msg = "Error occurred in thread " + threads[i].getName() + ":\n" + threads[i].error.getMessage();
|
||||
fail(msg);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (final IndexReader readerToClose : readersToClose) {
|
||||
readerToClose.close();
|
||||
}
|
||||
|
||||
firstReader.close();
|
||||
reader.close();
|
||||
|
||||
for (final IndexReader readerToClose : readersToClose) {
|
||||
assertReaderClosed(readerToClose, true, true);
|
||||
}
|
||||
|
||||
assertReaderClosed(reader, true, true);
|
||||
assertReaderClosed(firstReader, true, true);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private static class ReaderCouple {
|
||||
ReaderCouple(IndexReader r1, IndexReader r2) {
|
||||
newReader = r1;
|
||||
refreshedReader = r2;
|
||||
}
|
||||
|
||||
IndexReader newReader;
|
||||
IndexReader refreshedReader;
|
||||
}
|
||||
|
||||
private abstract static class ReaderThreadTask {
|
||||
protected volatile boolean stopped;
|
||||
public void stop() {
|
||||
this.stopped = true;
|
||||
}
|
||||
|
||||
public abstract void run() throws Exception;
|
||||
}
|
||||
|
||||
private static class ReaderThread extends Thread {
|
||||
private ReaderThreadTask task;
|
||||
private Throwable error;
|
||||
|
||||
|
||||
ReaderThread(ReaderThreadTask task) {
|
||||
this.task = task;
|
||||
}
|
||||
|
||||
public void stopThread() {
|
||||
this.task.stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
this.task.run();
|
||||
} catch (Throwable r) {
|
||||
r.printStackTrace(System.out);
|
||||
this.error = r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private Object createReaderMutex = new Object();
|
||||
|
||||
private ReaderCouple refreshReader(IndexReader reader, boolean hasChanges) throws IOException {
|
||||
return refreshReader(reader, null, -1, hasChanges);
|
||||
}
|
||||
|
||||
ReaderCouple refreshReader(IndexReader reader, TestReopen test, int modify, boolean hasChanges) throws IOException {
|
||||
synchronized (createReaderMutex) {
|
||||
IndexReader r = null;
|
||||
if (test != null) {
|
||||
test.modifyIndex(modify);
|
||||
r = test.openReader();
|
||||
}
|
||||
|
||||
IndexReader refreshed = null;
|
||||
try {
|
||||
refreshed = IndexReader.openIfChanged(reader);
|
||||
if (refreshed == null) {
|
||||
refreshed = reader;
|
||||
}
|
||||
} finally {
|
||||
if (refreshed == null && r != null) {
|
||||
// Hit exception -- close opened reader
|
||||
r.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (hasChanges) {
|
||||
if (refreshed == reader) {
|
||||
fail("No new IndexReader instance created during refresh.");
|
||||
}
|
||||
} else {
|
||||
if (refreshed != reader) {
|
||||
fail("New IndexReader instance created during refresh even though index had no changes.");
|
||||
}
|
||||
}
|
||||
|
||||
return new ReaderCouple(r, refreshed);
|
||||
}
|
||||
}
|
||||
|
||||
public static void createIndex(Random random, Directory dir, boolean multiSegment) throws IOException {
|
||||
IndexWriter.unlock(dir);
|
||||
IndexWriter w = new IndexWriter(dir, LuceneTestCase.newIndexWriterConfig(random,
|
||||
|
@ -996,59 +165,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
return doc;
|
||||
}
|
||||
|
||||
static void modifyIndex(int i, Directory dir) throws IOException {
|
||||
switch (i) {
|
||||
case 0: {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: modify index");
|
||||
}
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.deleteDocuments(new Term("field2", "a11"));
|
||||
w.deleteDocuments(new Term("field2", "b30"));
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(4, "field1", sim.encodeNormValue(123f));
|
||||
reader.setNorm(44, "field2", sim.encodeNormValue(222f));
|
||||
reader.setNorm(44, "field4", sim.encodeNormValue(22f));
|
||||
reader.close();
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.forceMerge(1);
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.forceMerge(1);
|
||||
w.addDocument(createDocument(102, 4));
|
||||
w.addDocument(createDocument(103, 4));
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
reader.setNorm(5, "field1", sim.encodeNormValue(123f));
|
||||
reader.setNorm(55, "field2", sim.encodeNormValue(222f));
|
||||
reader.close();
|
||||
break;
|
||||
}
|
||||
case 5: {
|
||||
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertReaderClosed(IndexReader reader, boolean checkSubReaders, boolean checkNormsClosed) {
|
||||
assertEquals(0, reader.getRefCount());
|
||||
|
||||
|
@ -1100,7 +216,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
private abstract static class TestReopen {
|
||||
protected abstract IndexReader openReader() throws IOException;
|
||||
protected abstract void modifyIndex(int i) throws IOException;
|
||||
}
|
||||
|
||||
public void testCloseOrig() throws Throwable {
|
||||
|
@ -1125,72 +240,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeletes() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
createIndex(random, dir, false); // Create an index with a bunch of docs (1 segment)
|
||||
|
||||
modifyIndex(0, dir); // Get delete bitVector on 1st segment
|
||||
modifyIndex(5, dir); // Add a doc (2 segments)
|
||||
|
||||
IndexReader r1 = IndexReader.open(dir, false); // MSR
|
||||
|
||||
modifyIndex(5, dir); // Add another doc (3 segments)
|
||||
|
||||
IndexReader r2 = IndexReader.openIfChanged(r1); // MSR
|
||||
assertNotNull(r2);
|
||||
assertNull(IndexReader.openIfChanged(r2));
|
||||
assertTrue(r1 != r2);
|
||||
|
||||
SegmentReader sr1 = (SegmentReader) r1.getSequentialSubReaders()[0]; // Get SRs for the first segment from original
|
||||
SegmentReader sr2 = (SegmentReader) r2.getSequentialSubReaders()[0]; // and reopened IRs
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
|
||||
r2.deleteDocument(0);
|
||||
|
||||
// r1 should not see the delete
|
||||
final Bits r1LiveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertFalse(r1LiveDocs != null && !r1LiveDocs.get(0));
|
||||
|
||||
// Now r2 should have made a private copy of deleted docs:
|
||||
assertTrue(sr1.liveDocs!=sr2.liveDocs);
|
||||
|
||||
r1.close();
|
||||
r2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDeletes2() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
createIndex(random, dir, false);
|
||||
// Get delete bitVector
|
||||
modifyIndex(0, dir);
|
||||
IndexReader r1 = IndexReader.open(dir, false);
|
||||
|
||||
// Add doc:
|
||||
modifyIndex(5, dir);
|
||||
|
||||
IndexReader r2 = IndexReader.openIfChanged(r1);
|
||||
assertNotNull(r2);
|
||||
assertTrue(r1 != r2);
|
||||
|
||||
IndexReader[] rs2 = r2.getSequentialSubReaders();
|
||||
|
||||
SegmentReader sr1 = getOnlySegmentReader(r1);
|
||||
SegmentReader sr2 = (SegmentReader) rs2[0];
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
final BitVector liveDocs = sr1.liveDocs;
|
||||
r1.close();
|
||||
|
||||
r2.deleteDocument(0);
|
||||
assertTrue(liveDocs==sr2.liveDocs);
|
||||
r2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private static class KeepAllCommits implements IndexDeletionPolicy {
|
||||
public void onInit(List<? extends IndexCommit> commits) {
|
||||
}
|
||||
|
|
|
@ -39,213 +39,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
* separate norms, addDocument, addIndexes, forceMerge.
|
||||
*/
|
||||
public class TestNorms extends LuceneTestCase {
|
||||
|
||||
private class SimilarityProviderOne extends DefaultSimilarityProvider {
|
||||
@Override
|
||||
public Similarity get(String field) {
|
||||
return new DefaultSimilarity() {
|
||||
@Override
|
||||
public byte computeNorm(FieldInvertState state) {
|
||||
// diable length norm
|
||||
return encodeNormValue(state.getBoost());
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static final int NUM_FIELDS = 10;
|
||||
|
||||
private SimilarityProvider similarityProviderOne;
|
||||
private Analyzer anlzr;
|
||||
private int numDocNorms;
|
||||
private ArrayList<Float> norms;
|
||||
private ArrayList<Float> modifiedNorms;
|
||||
private float lastNorm = 0;
|
||||
private float normDelta = (float) 0.001;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
similarityProviderOne = new SimilarityProviderOne();
|
||||
anlzr = new MockAnalyzer(random);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that norms values are preserved as the index is maintained.
|
||||
* Including separate norms.
|
||||
* Including merging indexes with seprate norms.
|
||||
* Including forceMerge.
|
||||
*/
|
||||
public void testNorms() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
|
||||
createIndex(random, dir1);
|
||||
doTestNorms(random, dir1);
|
||||
|
||||
// test with a single index: index2
|
||||
ArrayList<Float> norms1 = norms;
|
||||
ArrayList<Float> modifiedNorms1 = modifiedNorms;
|
||||
int numDocNorms1 = numDocNorms;
|
||||
|
||||
norms = new ArrayList<Float>();
|
||||
modifiedNorms = new ArrayList<Float>();
|
||||
numDocNorms = 0;
|
||||
|
||||
Directory dir2 = newDirectory();
|
||||
|
||||
createIndex(random, dir2);
|
||||
doTestNorms(random, dir2);
|
||||
|
||||
// add index1 and index2 to a third index: index3
|
||||
Directory dir3 = newDirectory();
|
||||
|
||||
createIndex(random, dir3);
|
||||
IndexWriter iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.addIndexes(dir1,dir2);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
|
||||
norms1.addAll(norms);
|
||||
norms = norms1;
|
||||
modifiedNorms1.addAll(modifiedNorms);
|
||||
modifiedNorms = modifiedNorms1;
|
||||
numDocNorms += numDocNorms1;
|
||||
|
||||
// test with index3
|
||||
verifyIndex(dir3);
|
||||
doTestNorms(random, dir3);
|
||||
|
||||
// now with single segment
|
||||
iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
verifyIndex(dir3);
|
||||
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
dir3.close();
|
||||
}
|
||||
|
||||
private void doTestNorms(Random random, Directory dir) throws IOException {
|
||||
int num = atLeast(1);
|
||||
for (int i=0; i<num; i++) {
|
||||
addDocs(random, dir,12,true);
|
||||
verifyIndex(dir);
|
||||
modifyNormsForF1(dir);
|
||||
verifyIndex(dir);
|
||||
addDocs(random, dir,12,false);
|
||||
verifyIndex(dir);
|
||||
modifyNormsForF1(dir);
|
||||
verifyIndex(dir);
|
||||
}
|
||||
}
|
||||
|
||||
private void createIndex(Random random, Directory dir) throws IOException {
|
||||
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy()));
|
||||
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(true);
|
||||
iw.close();
|
||||
}
|
||||
|
||||
private void modifyNormsForF1(Directory dir) throws IOException {
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
int n = ir.maxDoc();
|
||||
for (int i = 0; i < n; i+=3) { // modify for every third doc
|
||||
int k = (i*3) % modifiedNorms.size();
|
||||
float origNorm = modifiedNorms.get(i).floatValue();
|
||||
float newNorm = modifiedNorms.get(k).floatValue();
|
||||
//System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
|
||||
//System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
|
||||
modifiedNorms.set(i, Float.valueOf(newNorm));
|
||||
modifiedNorms.set(k, Float.valueOf(origNorm));
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
ir.setNorm(i, "f"+1, sim.encodeNormValue(newNorm));
|
||||
ir.setNorm(k, "f"+1, sim.encodeNormValue(origNorm));
|
||||
}
|
||||
ir.close();
|
||||
}
|
||||
|
||||
|
||||
private void verifyIndex(Directory dir) throws IOException {
|
||||
IndexReader ir = IndexReader.open(dir, false);
|
||||
for (int i = 0; i < NUM_FIELDS; i++) {
|
||||
String field = "f"+i;
|
||||
byte b[] = MultiNorms.norms(ir, field);
|
||||
assertEquals("number of norms mismatches",numDocNorms,b.length);
|
||||
ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
|
||||
DefaultSimilarity sim = (DefaultSimilarity) similarityProviderOne.get(field);
|
||||
for (int j = 0; j < b.length; j++) {
|
||||
float norm = sim.decodeNormValue(b[j]);
|
||||
float norm1 = storedNorms.get(j).floatValue();
|
||||
assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
|
||||
}
|
||||
}
|
||||
ir.close();
|
||||
}
|
||||
|
||||
private void addDocs(Random random, Directory dir, int ndocs, boolean compound) throws IOException {
|
||||
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
|
||||
.setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy()));
|
||||
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
|
||||
lmp.setMergeFactor(3);
|
||||
lmp.setUseCompoundFile(compound);
|
||||
for (int i = 0; i < ndocs; i++) {
|
||||
iw.addDocument(newDoc());
|
||||
}
|
||||
iw.close();
|
||||
}
|
||||
|
||||
// create the next document
|
||||
private Document newDoc() {
|
||||
Document d = new Document();
|
||||
float boost = nextNorm("anyfield"); // in this test the same similarity is used for all fields so it does not matter what field is passed
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Field f = newField("f"+i,"v"+i,TextField.TYPE_UNSTORED);
|
||||
f.setBoost(boost);
|
||||
d.add(f);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
// return unique norm values that are unchanged by encoding/decoding
|
||||
private float nextNorm(String fname) {
|
||||
float norm = lastNorm + normDelta;
|
||||
DefaultSimilarity similarity = (DefaultSimilarity) similarityProviderOne.get(fname);
|
||||
do {
|
||||
float norm1 = similarity.decodeNormValue(similarity.encodeNormValue(norm));
|
||||
if (norm1 > lastNorm) {
|
||||
//System.out.println(norm1+" > "+lastNorm);
|
||||
norm = norm1;
|
||||
break;
|
||||
}
|
||||
norm += normDelta;
|
||||
} while (true);
|
||||
norms.add(numDocNorms, Float.valueOf(norm));
|
||||
modifiedNorms.add(numDocNorms, Float.valueOf(norm));
|
||||
//System.out.println("creating norm("+numDocNorms+"): "+norm);
|
||||
numDocNorms ++;
|
||||
lastNorm = (norm>10 ? 0 : norm); //there's a limit to how many distinct values can be stored in a ingle byte
|
||||
return norm;
|
||||
}
|
||||
|
||||
class CustomNormEncodingSimilarity extends DefaultSimilarity {
|
||||
@Override
|
||||
|
|
|
@ -115,34 +115,6 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
public void testIsCurrent() throws IOException {
|
||||
Directory dir1 = getDir1(random);
|
||||
Directory dir2 = getDir2(random);
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
|
||||
assertTrue(pr.isCurrent());
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
DefaultSimilarity sim = new DefaultSimilarity();
|
||||
modifier.setNorm(0, "f1", sim.encodeNormValue(100f));
|
||||
modifier.close();
|
||||
|
||||
// one of the two IndexReaders which ParallelReader is using
|
||||
// is not current anymore
|
||||
assertFalse(pr.isCurrent());
|
||||
|
||||
modifier = IndexReader.open(dir2, false);
|
||||
modifier.setNorm(0, "f3", sim.encodeNormValue(100f));
|
||||
modifier.close();
|
||||
|
||||
// now both are not current anymore
|
||||
assertFalse(pr.isCurrent());
|
||||
pr.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
private void queryTest(Query query) throws IOException {
|
||||
ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
|
||||
|
|
|
@ -155,27 +155,6 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
}
|
||||
assertFalse("should not have been able to create a .cfs with .del and .s* files", doFail);
|
||||
|
||||
// Create an index w/ .s*
|
||||
w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
|
||||
doc = new Document();
|
||||
doc.add(new TextField("c", "test"));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
r.setNorm(0, "c", (byte) 1);
|
||||
r.close();
|
||||
|
||||
// Assert that SM fails if .s* exists
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(dir);
|
||||
try {
|
||||
IndexWriter.createCompoundFile(dir, "b2", MergeState.CheckAbort.NONE, sis.info(0), newIOContext(random));
|
||||
doFail = true; // should never get here
|
||||
} catch (AssertionError e) {
|
||||
// expected
|
||||
}
|
||||
assertFalse("should not have been able to create a .cfs with .del and .s* files", doFail);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/** Document boost unit test.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class TestSetNorm extends LuceneTestCase {
|
||||
|
||||
public void testSetNorm() throws Exception {
|
||||
Directory store = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
|
||||
|
||||
// add the same document four times
|
||||
Field f1 = newField("field", "word", TextField.TYPE_STORED);
|
||||
Document d1 = new Document();
|
||||
d1.add(f1);
|
||||
writer.addDocument(d1);
|
||||
writer.addDocument(d1);
|
||||
writer.addDocument(d1);
|
||||
writer.addDocument(d1);
|
||||
writer.close();
|
||||
|
||||
// reset the boost of each instance of this document
|
||||
IndexReader reader = IndexReader.open(store, false);
|
||||
DefaultSimilarity similarity = new DefaultSimilarity();
|
||||
reader.setNorm(0, "field", similarity.encodeNormValue(1.0f));
|
||||
reader.setNorm(1, "field", similarity.encodeNormValue(2.0f));
|
||||
reader.setNorm(2, "field", similarity.encodeNormValue(4.0f));
|
||||
reader.setNorm(3, "field", similarity.encodeNormValue(16.0f));
|
||||
reader.close();
|
||||
|
||||
// check that searches are ordered by this boost
|
||||
final float[] scores = new float[4];
|
||||
|
||||
IndexReader ir = IndexReader.open(store);
|
||||
IndexSearcher is = new IndexSearcher(ir);
|
||||
is.search
|
||||
(new TermQuery(new Term("field", "word")),
|
||||
new Collector() {
|
||||
private int base = 0;
|
||||
private Scorer scorer;
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
this.scorer = scorer;
|
||||
}
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
scores[doc + base] = scorer.score();
|
||||
}
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) {
|
||||
base = context.docBase;
|
||||
}
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
is.close();
|
||||
ir.close();
|
||||
float lastScore = 0.0f;
|
||||
|
||||
for (int i = 0; i < 4; i++) {
|
||||
assertTrue(scores[i] > lastScore);
|
||||
lastScore = scores[i];
|
||||
}
|
||||
store.close();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue