fix deprecated tests #1 (trunk)

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@821340 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2009-10-03 15:45:34 +00:00
commit 834a116608
121 changed files with 517 additions and 3841 deletions

View File

@ -1,85 +0,0 @@
package org.apache.lucene;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.Hit;
import org.apache.lucene.search.HitIterator;
import java.util.NoSuchElementException;
/**
* This test intentionally not put in the search package in order
* to test HitIterator and Hit package protection.
*
* @deprecated Hits will be removed in Lucene 3.0
*/
public class TestHitIterator extends LuceneTestCase {
public void testIterator() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "iterator test doc 1", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "iterator test doc 2", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
_TestUtil.checkIndex(directory);
IndexSearcher searcher = new IndexSearcher(directory);
Hits hits = searcher.search(new TermQuery(new Term("field", "iterator")));
HitIterator iterator = (HitIterator) hits.iterator();
assertEquals(2, iterator.length());
assertTrue(iterator.hasNext());
Hit hit = (Hit) iterator.next();
assertEquals("iterator test doc 1", hit.get("field"));
assertTrue(iterator.hasNext());
hit = (Hit) iterator.next();
assertEquals("iterator test doc 2", hit.getDocument().get("field"));
assertFalse(iterator.hasNext());
boolean caughtException = false;
try {
iterator.next();
} catch (NoSuchElementException e) {
assertTrue(true);
caughtException = true;
}
assertTrue(caughtException);
}
}

View File

@ -98,7 +98,7 @@ public class TestSearch extends LuceneTestCase {
}
writer.close();
Searcher searcher = new IndexSearcher(directory);
Searcher searcher = new IndexSearcher(directory, true);
String[] queries = {
"a b",

View File

@ -100,7 +100,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
writer.close();
// try a search without OR
Searcher searcher = new IndexSearcher(directory);
Searcher searcher = new IndexSearcher(directory, true);
QueryParser parser = new QueryParser(PRIORITY_FIELD, analyzer);
@ -114,7 +114,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
searcher.close();
// try a new search with OR
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
hits = null;
parser = new QueryParser(PRIORITY_FIELD, analyzer);

View File

@ -32,56 +32,22 @@ import org.apache.lucene.util.LuceneTestCase;
*/
public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
private boolean onlyUseNewAPI = false;
private final Set testWithNewAPI;
public BaseTokenStreamTestCase() {
super();
this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI
}
public BaseTokenStreamTestCase(String name) {
super(name);
this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI
}
/** @deprecated */
public BaseTokenStreamTestCase(Set testWithNewAPI) {
super();
this.testWithNewAPI = testWithNewAPI;
}
/** @deprecated */
public BaseTokenStreamTestCase(String name, Set testWithNewAPI) {
super(name);
this.testWithNewAPI = testWithNewAPI;
}
// @Override
protected void setUp() throws Exception {
super.setUp();
TokenStream.setOnlyUseNewAPI(onlyUseNewAPI);
}
// @Override
public void runBare() throws Throwable {
// Do the test with onlyUseNewAPI=false (default)
try {
onlyUseNewAPI = false;
super.runBare();
} catch (Throwable e) {
System.out.println("Test failure of '"+getName()+"' occurred with onlyUseNewAPI=false");
throw e;
}
if (testWithNewAPI == null || testWithNewAPI.contains(getName())) {
// Do the test again with onlyUseNewAPI=true
try {
onlyUseNewAPI = true;
super.runBare();
} catch (Throwable e) {
System.out.println("Test failure of '"+getName()+"' occurred with onlyUseNewAPI=true");
throw e;
}
}
}
// some helpers to test Analyzers and TokenStreams:

View File

@ -70,7 +70,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
assertTrue(termPositions.next());
assertEquals(1, termPositions.freq());

View File

@ -1,34 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import org.apache.lucene.util.LuceneTestCase;
/**
* Testcase for {@link CharacterCache}
*/
public class TestCharacterCache extends LuceneTestCase {
public void testValueOf() {
for (int i = 0; i < 256; i++) {
Character valueOf = CharacterCache.valueOf((char)i);
assertEquals((char)i, valueOf.charValue());
}
}
}

View File

@ -1,111 +0,0 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import java.io.StringReader;
public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
public void testU() throws Exception {
TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
assertTermEquals("Des", filter, termAtt);
assertTermEquals("mot", filter, termAtt);
assertTermEquals("cles", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("LA", filter, termAtt);
assertTermEquals("CHAINE", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("A", filter, termAtt);
assertTermEquals("AE", filter, termAtt);
assertTermEquals("C", filter, termAtt);
assertTermEquals("E", filter, termAtt);
assertTermEquals("E", filter, termAtt);
assertTermEquals("E", filter, termAtt);
assertTermEquals("E", filter, termAtt);
assertTermEquals("I", filter, termAtt);
assertTermEquals("I", filter, termAtt);
assertTermEquals("I", filter, termAtt);
assertTermEquals("I", filter, termAtt);
assertTermEquals("IJ", filter, termAtt);
assertTermEquals("D", filter, termAtt);
assertTermEquals("N", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("O", filter, termAtt);
assertTermEquals("OE", filter, termAtt);
assertTermEquals("TH", filter, termAtt);
assertTermEquals("U", filter, termAtt);
assertTermEquals("U", filter, termAtt);
assertTermEquals("U", filter, termAtt);
assertTermEquals("U", filter, termAtt);
assertTermEquals("Y", filter, termAtt);
assertTermEquals("Y", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("a", filter, termAtt);
assertTermEquals("ae", filter, termAtt);
assertTermEquals("c", filter, termAtt);
assertTermEquals("e", filter, termAtt);
assertTermEquals("e", filter, termAtt);
assertTermEquals("e", filter, termAtt);
assertTermEquals("e", filter, termAtt);
assertTermEquals("i", filter, termAtt);
assertTermEquals("i", filter, termAtt);
assertTermEquals("i", filter, termAtt);
assertTermEquals("i", filter, termAtt);
assertTermEquals("ij", filter, termAtt);
assertTermEquals("d", filter, termAtt);
assertTermEquals("n", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("o", filter, termAtt);
assertTermEquals("oe", filter, termAtt);
assertTermEquals("ss", filter, termAtt);
assertTermEquals("th", filter, termAtt);
assertTermEquals("u", filter, termAtt);
assertTermEquals("u", filter, termAtt);
assertTermEquals("u", filter, termAtt);
assertTermEquals("u", filter, termAtt);
assertTermEquals("y", filter, termAtt);
assertTermEquals("y", filter, termAtt);
assertTermEquals("fi", filter, termAtt);
assertTermEquals("fl", filter, termAtt);
assertFalse(filter.incrementToken());
}
void assertTermEquals(String expected, TokenStream stream, TermAttribute termAtt) throws Exception {
assertTrue(stream.incrementToken());
assertEquals(expected, termAtt.term());
}
}

View File

@ -51,7 +51,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
}
public void testPerFieldAnalyzer() throws Exception {
@ -78,7 +78,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermDocs td = reader.termDocs(new Term("partnum", "Q36"));
assertTrue(td.next());
td = reader.termDocs(new Term("partnum", "Q37"));

View File

@ -1,258 +0,0 @@
package org.apache.lucene.analysis;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.List;
/**
* tests for the TeeTokenFilter and SinkTokenizer
*/
public class TestTeeTokenFilter extends LuceneTestCase {
protected StringBuilder buffer1;
protected StringBuilder buffer2;
protected String[] tokens1;
protected String[] tokens2;
public TestTeeTokenFilter(String s) {
super(s);
}
protected void setUp() throws Exception {
super.setUp();
tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"};
tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"};
buffer1 = new StringBuilder();
for (int i = 0; i < tokens1.length; i++) {
buffer1.append(tokens1[i]).append(' ');
}
buffer2 = new StringBuilder();
for (int i = 0; i < tokens2.length; i++) {
buffer2.append(tokens2[i]).append(' ');
}
}
public void test() throws IOException {
SinkTokenizer sink1 = new SinkTokenizer(null) {
public void add(Token t) {
if (t != null && t.term().equalsIgnoreCase("The")) {
super.add(t);
}
}
};
TokenStream source = new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), sink1);
int i = 0;
final Token reusableToken = new Token();
for (Token nextToken = source.next(reusableToken); nextToken != null; nextToken = source.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
assertTrue("sink1 Size: " + sink1.getTokens().size() + " is not: " + 2, sink1.getTokens().size() == 2);
i = 0;
for (Token token = sink1.next(reusableToken); token != null; token = sink1.next(reusableToken)) {
assertTrue(token.term() + " is not equal to " + "The", token.term().equalsIgnoreCase("The") == true);
i++;
}
assertTrue(i + " does not equal: " + sink1.getTokens().size(), i == sink1.getTokens().size());
}
public void testMultipleSources() throws Exception {
SinkTokenizer theDetector = new SinkTokenizer(null) {
public void add(Token t) {
if (t != null && t.term().equalsIgnoreCase("The")) {
super.add(t);
}
}
};
SinkTokenizer dogDetector = new SinkTokenizer(null) {
public void add(Token t) {
if (t != null && t.term().equalsIgnoreCase("Dogs")) {
super.add(t);
}
}
};
TokenStream source1 = new CachingTokenFilter(new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), theDetector), dogDetector));
TokenStream source2 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer2.toString())), theDetector), dogDetector);
int i = 0;
final Token reusableToken = new Token();
for (Token nextToken = source1.next(reusableToken); nextToken != null; nextToken = source1.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 2, theDetector.getTokens().size() == 2);
assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 1, dogDetector.getTokens().size() == 1);
i = 0;
for (Token nextToken = source2.next(reusableToken); nextToken != null; nextToken = source2.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + tokens2[i], nextToken.term().equals(tokens2[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens2.length, i == tokens2.length);
assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 4, theDetector.getTokens().size() == 4);
assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 2, dogDetector.getTokens().size() == 2);
i = 0;
for (Token nextToken = theDetector.next(reusableToken); nextToken != null; nextToken = theDetector.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + "The", nextToken.term().equalsIgnoreCase("The") == true);
i++;
}
assertTrue(i + " does not equal: " + theDetector.getTokens().size(), i == theDetector.getTokens().size());
i = 0;
for (Token nextToken = dogDetector.next(reusableToken); nextToken != null; nextToken = dogDetector.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + "Dogs", nextToken.term().equalsIgnoreCase("Dogs") == true);
i++;
}
assertTrue(i + " does not equal: " + dogDetector.getTokens().size(), i == dogDetector.getTokens().size());
source1.reset();
TokenStream lowerCasing = new LowerCaseFilter(source1);
i = 0;
for (Token nextToken = lowerCasing.next(reusableToken); nextToken != null; nextToken = lowerCasing.next(reusableToken)) {
assertTrue(nextToken.term() + " is not equal to " + tokens1[i].toLowerCase(), nextToken.term().equals(tokens1[i].toLowerCase()) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
}
/**
* Not an explicit test, just useful to print out some info on performance
*
* @throws Exception
*/
public void performance() throws Exception {
int[] tokCount = {100, 500, 1000, 2000, 5000, 10000};
int[] modCounts = {1, 2, 5, 10, 20, 50, 100, 200, 500};
for (int k = 0; k < tokCount.length; k++) {
StringBuilder buffer = new StringBuilder();
System.out.println("-----Tokens: " + tokCount[k] + "-----");
for (int i = 0; i < tokCount[k]; i++) {
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');
}
//make sure we produce the same tokens
ModuloSinkTokenizer sink = new ModuloSinkTokenizer(tokCount[k], 100);
final Token reusableToken = new Token();
TokenStream stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink);
while (stream.next(reusableToken) != null) {
}
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), 100);
List tmp = new ArrayList();
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
tmp.add(nextToken.clone());
}
List sinkList = sink.getTokens();
assertTrue("tmp Size: " + tmp.size() + " is not: " + sinkList.size(), tmp.size() == sinkList.size());
for (int i = 0; i < tmp.size(); i++) {
Token tfTok = (Token) tmp.get(i);
Token sinkTok = (Token) sinkList.get(i);
assertTrue(tfTok.term() + " is not equal to " + sinkTok.term() + " at token: " + i, tfTok.term().equals(sinkTok.term()) == true);
}
//simulate two fields, each being analyzed once, for 20 documents
for (int j = 0; j < modCounts.length; j++) {
int tfPos = 0;
long start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
stream = new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString())));
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
tfPos += nextToken.getPositionIncrement();
}
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), modCounts[j]);
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
tfPos += nextToken.getPositionIncrement();
}
}
long finish = System.currentTimeMillis();
System.out.println("ModCount: " + modCounts[j] + " Two fields took " + (finish - start) + " ms");
int sinkPos = 0;
//simulate one field with one sink
start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
sink = new ModuloSinkTokenizer(tokCount[k], modCounts[j]);
stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink);
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
sinkPos += nextToken.getPositionIncrement();
}
//System.out.println("Modulo--------");
stream = sink;
for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
sinkPos += nextToken.getPositionIncrement();
}
}
finish = System.currentTimeMillis();
System.out.println("ModCount: " + modCounts[j] + " Tee fields took " + (finish - start) + " ms");
assertTrue(sinkPos + " does not equal: " + tfPos, sinkPos == tfPos);
}
System.out.println("- End Tokens: " + tokCount[k] + "-----");
}
}
class ModuloTokenFilter extends TokenFilter {
int modCount;
ModuloTokenFilter(TokenStream input, int mc) {
super(input);
modCount = mc;
}
int count = 0;
//return every 100 tokens
public Token next(final Token reusableToken) throws IOException {
Token nextToken = null;
for (nextToken = input.next(reusableToken);
nextToken != null && count % modCount != 0;
nextToken = input.next(reusableToken)) {
count++;
}
count++;
return nextToken;
}
}
class ModuloSinkTokenizer extends SinkTokenizer {
int count = 0;
int modCount;
ModuloSinkTokenizer(int numToks, int mc) {
modCount = mc;
lst = new ArrayList(numToks % mc);
}
public void add(Token t) {
if (t != null && count % modCount == 0) {
super.add(t);
}
count++;
}
}
}

View File

@ -1,393 +0,0 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.tokenattributes.*;
/** This class tests some special cases of backwards compatibility when using the new TokenStream API with old analyzers */
public class TestTokenStreamBWComp extends LuceneTestCase {
private static final String doc = "This is the new TokenStream api";
private static final String[] stopwords = new String[] {"is", "the", "this"};
private static final String[] results = new String[] {"new", "tokenstream", "api"};
public static class POSToken extends Token {
public static final int PROPERNOUN = 1;
public static final int NO_NOUN = 2;
private int partOfSpeech;
public void setPartOfSpeech(int pos) {
partOfSpeech = pos;
}
public int getPartOfSpeech() {
return this.partOfSpeech;
}
}
static class PartOfSpeechTaggingFilter extends TokenFilter {
protected PartOfSpeechTaggingFilter(TokenStream input) {
super(input);
}
public Token next() throws IOException {
Token t = input.next();
if (t == null) return null;
POSToken pt = new POSToken();
pt.reinit(t);
if (pt.termLength() > 0) {
if (Character.isUpperCase(pt.termBuffer()[0])) {
pt.setPartOfSpeech(POSToken.PROPERNOUN);
} else {
pt.setPartOfSpeech(POSToken.NO_NOUN);
}
}
return pt;
}
}
static class PartOfSpeechAnnotatingFilter extends TokenFilter {
public final static byte PROPER_NOUN_ANNOTATION = 1;
protected PartOfSpeechAnnotatingFilter(TokenStream input) {
super(input);
}
public Token next() throws IOException {
Token t = input.next();
if (t == null) return null;
if (t instanceof POSToken) {
POSToken pt = (POSToken) t;
if (pt.getPartOfSpeech() == POSToken.PROPERNOUN) {
pt.setPayload(new Payload(new byte[] {PROPER_NOUN_ANNOTATION}));
}
return pt;
} else {
return t;
}
}
}
// test the chain: The one and only term "TokenStream" should be declared as proper noun:
public void testTeeSinkCustomTokenNewAPI() throws IOException {
testTeeSinkCustomToken(0);
}
public void testTeeSinkCustomTokenOldAPI() throws IOException {
testTeeSinkCustomToken(1);
}
public void testTeeSinkCustomTokenVeryOldAPI() throws IOException {
testTeeSinkCustomToken(2);
}
private void testTeeSinkCustomToken(int api) throws IOException {
TokenStream stream = new WhitespaceTokenizer(new StringReader(doc));
stream = new PartOfSpeechTaggingFilter(stream);
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, stopwords);
SinkTokenizer sink = new SinkTokenizer();
TokenStream stream1 = new PartOfSpeechAnnotatingFilter(sink);
stream = new TeeTokenFilter(stream, sink);
stream = new PartOfSpeechAnnotatingFilter(stream);
switch (api) {
case 0:
consumeStreamNewAPI(stream);
consumeStreamNewAPI(stream1);
break;
case 1:
consumeStreamOldAPI(stream);
consumeStreamOldAPI(stream1);
break;
case 2:
consumeStreamVeryOldAPI(stream);
consumeStreamVeryOldAPI(stream1);
break;
}
}
// test caching the special custom POSToken works in all cases
public void testCachingCustomTokenNewAPI() throws IOException {
testTeeSinkCustomToken(0);
}
public void testCachingCustomTokenOldAPI() throws IOException {
testTeeSinkCustomToken(1);
}
public void testCachingCustomTokenVeryOldAPI() throws IOException {
testTeeSinkCustomToken(2);
}
public void testCachingCustomTokenMixed() throws IOException {
testTeeSinkCustomToken(3);
}
private void testCachingCustomToken(int api) throws IOException {
TokenStream stream = new WhitespaceTokenizer(new StringReader(doc));
stream = new PartOfSpeechTaggingFilter(stream);
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, stopwords);
stream = new CachingTokenFilter(stream); // <- the caching is done before the annotating!
stream = new PartOfSpeechAnnotatingFilter(stream);
switch (api) {
case 0:
consumeStreamNewAPI(stream);
consumeStreamNewAPI(stream);
break;
case 1:
consumeStreamOldAPI(stream);
consumeStreamOldAPI(stream);
break;
case 2:
consumeStreamVeryOldAPI(stream);
consumeStreamVeryOldAPI(stream);
break;
case 3:
consumeStreamNewAPI(stream);
consumeStreamOldAPI(stream);
consumeStreamVeryOldAPI(stream);
consumeStreamNewAPI(stream);
consumeStreamVeryOldAPI(stream);
break;
}
}
private static void consumeStreamNewAPI(TokenStream stream) throws IOException {
stream.reset();
PayloadAttribute payloadAtt = stream.addAttribute(PayloadAttribute.class);
TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
int i=0;
while (stream.incrementToken()) {
String term = termAtt.term();
Payload p = payloadAtt.getPayload();
if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) {
assertEquals("only TokenStream is a proper noun", "tokenstream", term);
} else {
assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term));
}
assertEquals(results[i], term);
i++;
}
}
private static void consumeStreamOldAPI(TokenStream stream) throws IOException {
stream.reset();
Token reusableToken = new Token();
int i=0;
while ((reusableToken = stream.next(reusableToken)) != null) {
String term = reusableToken.term();
Payload p = reusableToken.getPayload();
if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) {
assertEquals("only TokenStream is a proper noun", "tokenstream", term);
} else {
assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term));
}
assertEquals(results[i], term);
i++;
}
}
private static void consumeStreamVeryOldAPI(TokenStream stream) throws IOException {
stream.reset();
Token token;
int i=0;
while ((token = stream.next()) != null) {
String term = token.term();
Payload p = token.getPayload();
if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) {
assertEquals("only TokenStream is a proper noun", "tokenstream", term);
} else {
assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term));
}
assertEquals(results[i], term);
i++;
}
}
// test if tokenization fails, if only the new API is allowed and an old TokenStream is in the chain
public void testOnlyNewAPI() throws IOException {
TokenStream.setOnlyUseNewAPI(true);
try {
// this should fail with UOE
try {
TokenStream stream = new WhitespaceTokenizer(new StringReader(doc));
stream = new PartOfSpeechTaggingFilter(stream); // <-- this one is evil!
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, stopwords);
while (stream.incrementToken());
fail("If only the new API is allowed, this should fail with an UOE");
} catch (UnsupportedOperationException uoe) {
assertEquals((PartOfSpeechTaggingFilter.class.getName()+" does not implement incrementToken() which is needed for onlyUseNewAPI."),uoe.getMessage());
}
// this should pass, as all core token streams support the new API
TokenStream stream = new WhitespaceTokenizer(new StringReader(doc));
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, stopwords);
while (stream.incrementToken());
// Test, if all attributes are implemented by their implementation, not Token/TokenWrapper
assertTrue("TermAttribute is not implemented by TermAttributeImpl",
stream.addAttribute(TermAttribute.class) instanceof TermAttributeImpl);
assertTrue("OffsetAttribute is not implemented by OffsetAttributeImpl",
stream.addAttribute(OffsetAttribute.class) instanceof OffsetAttributeImpl);
assertTrue("FlagsAttribute is not implemented by FlagsAttributeImpl",
stream.addAttribute(FlagsAttribute.class) instanceof FlagsAttributeImpl);
assertTrue("PayloadAttribute is not implemented by PayloadAttributeImpl",
stream.addAttribute(PayloadAttribute.class) instanceof PayloadAttributeImpl);
assertTrue("PositionIncrementAttribute is not implemented by PositionIncrementAttributeImpl",
stream.addAttribute(PositionIncrementAttribute.class) instanceof PositionIncrementAttributeImpl);
assertTrue("TypeAttribute is not implemented by TypeAttributeImpl",
stream.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl);
// try to call old API, this should fail
try {
stream.reset();
Token reusableToken = new Token();
while ((reusableToken = stream.next(reusableToken)) != null);
fail("If only the new API is allowed, this should fail with an UOE");
} catch (UnsupportedOperationException uoe) {
assertEquals("This TokenStream only supports the new Attributes API.", uoe.getMessage());
}
try {
stream.reset();
while (stream.next() != null);
fail("If only the new API is allowed, this should fail with an UOE");
} catch (UnsupportedOperationException uoe) {
assertEquals("This TokenStream only supports the new Attributes API.", uoe.getMessage());
}
// Test if the wrapper API (onlyUseNewAPI==false) uses TokenWrapper
// as attribute instance.
// TokenWrapper encapsulates a Token instance that can be exchanged
// by another Token instance without changing the AttributeImpl instance
// itsself.
TokenStream.setOnlyUseNewAPI(false);
stream = new WhitespaceTokenizer(new StringReader(doc));
assertTrue("TermAttribute is not implemented by TokenWrapper",
stream.addAttribute(TermAttribute.class) instanceof TokenWrapper);
assertTrue("OffsetAttribute is not implemented by TokenWrapper",
stream.addAttribute(OffsetAttribute.class) instanceof TokenWrapper);
assertTrue("FlagsAttribute is not implemented by TokenWrapper",
stream.addAttribute(FlagsAttribute.class) instanceof TokenWrapper);
assertTrue("PayloadAttribute is not implemented by TokenWrapper",
stream.addAttribute(PayloadAttribute.class) instanceof TokenWrapper);
assertTrue("PositionIncrementAttribute is not implemented by TokenWrapper",
stream.addAttribute(PositionIncrementAttribute.class) instanceof TokenWrapper);
assertTrue("TypeAttribute is not implemented by TokenWrapper",
stream.addAttribute(TypeAttribute.class) instanceof TokenWrapper);
} finally {
TokenStream.setOnlyUseNewAPI(false);
}
}
public void testOverridesAny() throws Exception {
try {
TokenStream stream = new WhitespaceTokenizer(new StringReader(doc));
stream = new TokenFilter(stream) {
// we implement nothing, only un-abstract it
};
stream = new LowerCaseFilter(stream);
stream = new StopFilter(stream, stopwords);
while (stream.incrementToken());
fail("One TokenFilter does not override any of the required methods, so it should fail.");
} catch (UnsupportedOperationException uoe) {
assertTrue("invalid UOE message", uoe.getMessage().endsWith("does not implement any of incrementToken(), next(Token), next()."));
}
}
public void testMixedOldApiConsumer() throws Exception {
// WhitespaceTokenizer is using incrementToken() API:
TokenStream stream = new WhitespaceTokenizer(new StringReader("foo bar moo maeh"));
Token foo = new Token();
foo = stream.next(foo);
Token bar = stream.next();
assertEquals("foo", foo.term());
assertEquals("bar", bar.term());
Token moo = stream.next(foo);
assertEquals("moo", moo.term());
assertEquals("private 'bar' term should still be valid", "bar", bar.term());
// and now we also use incrementToken()... (very bad, but should work)
TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("maeh", termAtt.term());
assertEquals("private 'bar' term should still be valid", "bar", bar.term());
}
/*
* old api that cycles thru foo, bar, meh
*/
private class RoundRobinOldAPI extends TokenStream {
int count = 0;
String terms[] = { "foo", "bar", "meh" };
public Token next(Token reusableToken) throws IOException {
reusableToken.setTermBuffer(terms[count % terms.length]);
count++;
return reusableToken;
}
}
public void testMixedOldApiConsumer2() throws Exception {
// RoundRobinOldAPI is using TokenStream(next)
TokenStream stream = new RoundRobinOldAPI();
TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
Token bar = stream.next();
assertEquals("foo", termAtt.term());
assertEquals("bar", bar.term());
assertTrue(stream.incrementToken());
assertEquals("meh", termAtt.term());
assertEquals("private 'bar' term should still be valid", "bar", bar.term());
Token foo = stream.next();
assertEquals("the term attribute should still be the same", "meh", termAtt.term());
assertEquals("foo", foo.term());
assertEquals("private 'bar' term should still be valid", "bar", bar.term());
}
}

View File

@ -71,7 +71,7 @@ public class TestBinaryDocument extends LuceneTestCase
writer.close();
/** open a reader and fetch the document */
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);
@ -117,7 +117,7 @@ public class TestBinaryDocument extends LuceneTestCase
writer.close();
/** open a reader and fetch the document */
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);

View File

@ -1,198 +0,0 @@
package org.apache.lucene.document;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.TimeZone;
import java.util.Locale;
import org.apache.lucene.util.LocalizedTestCase;
import org.apache.lucene.util.LuceneTestCase;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class TestDateTools extends LocalizedTestCase {
public void testStringToDate() throws ParseException {
Date d = null;
d = DateTools.stringToDate("2004");
assertEquals("2004-01-01 00:00:00:000", isoFormat(d));
d = DateTools.stringToDate("20040705");
assertEquals("2004-07-05 00:00:00:000", isoFormat(d));
d = DateTools.stringToDate("200407050910");
assertEquals("2004-07-05 09:10:00:000", isoFormat(d));
d = DateTools.stringToDate("20040705091055990");
assertEquals("2004-07-05 09:10:55:990", isoFormat(d));
try {
d = DateTools.stringToDate("97"); // no date
fail();
} catch(ParseException e) { /* expected exception */ }
try {
d = DateTools.stringToDate("200401011235009999"); // no date
fail();
} catch(ParseException e) { /* expected exception */ }
try {
d = DateTools.stringToDate("aaaa"); // no date
fail();
} catch(ParseException e) { /* expected exception */ }
}
public void testStringtoTime() throws ParseException {
long time = DateTools.stringToTime("197001010000");
Calendar cal = new GregorianCalendar();
cal.set(1970, 0, 1, // year=1970, month=january, day=1
0, 0, 0); // hour, minute, second
cal.set(Calendar.MILLISECOND, 0);
cal.setTimeZone(TimeZone.getTimeZone("GMT"));
assertEquals(cal.getTime().getTime(), time);
cal.set(1980, 1, 2, // year=1980, month=february, day=2
11, 5, 0); // hour, minute, second
cal.set(Calendar.MILLISECOND, 0);
time = DateTools.stringToTime("198002021105");
assertEquals(cal.getTime().getTime(), time);
}
public void testDateAndTimetoString() throws ParseException {
Calendar cal = new GregorianCalendar();
cal.setTimeZone(TimeZone.getTimeZone("GMT"));
cal.set(2004, 1, 3, // year=2004, month=february(!), day=3
22, 8, 56); // hour, minute, second
cal.set(Calendar.MILLISECOND, 333);
String dateString;
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR);
assertEquals("2004", dateString);
assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH);
assertEquals("200402", dateString);
assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY);
assertEquals("20040203", dateString);
assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR);
assertEquals("2004020322", dateString);
assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE);
assertEquals("200402032208", dateString);
assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND);
assertEquals("20040203220856", dateString);
assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND);
assertEquals("20040203220856333", dateString);
assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString)));
// date before 1970:
cal.set(1961, 2, 5, // year=1961, month=march(!), day=5
23, 9, 51); // hour, minute, second
cal.set(Calendar.MILLISECOND, 444);
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND);
assertEquals("19610305230951444", dateString);
assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString)));
dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR);
assertEquals("1961030523", dateString);
assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString)));
// timeToString:
cal.set(1970, 0, 1, // year=1970, month=january, day=1
0, 0, 0); // hour, minute, second
cal.set(Calendar.MILLISECOND, 0);
dateString = DateTools.timeToString(cal.getTime().getTime(),
DateTools.Resolution.MILLISECOND);
assertEquals("19700101000000000", dateString);
cal.set(1970, 0, 1, // year=1970, month=january, day=1
1, 2, 3); // hour, minute, second
cal.set(Calendar.MILLISECOND, 0);
dateString = DateTools.timeToString(cal.getTime().getTime(),
DateTools.Resolution.MILLISECOND);
assertEquals("19700101010203000", dateString);
}
public void testRound() {
Calendar cal = new GregorianCalendar();
cal.setTimeZone(TimeZone.getTimeZone("GMT"));
cal.set(2004, 1, 3, // year=2004, month=february(!), day=3
22, 8, 56); // hour, minute, second
cal.set(Calendar.MILLISECOND, 333);
Date date = cal.getTime();
assertEquals("2004-02-03 22:08:56:333", isoFormat(date));
Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR);
assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear));
Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH);
assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth));
Date dateDay = DateTools.round(date, DateTools.Resolution.DAY);
assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay));
Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR);
assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour));
Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE);
assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute));
Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND);
assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond));
Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND);
assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond));
// long parameter:
long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR);
assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong)));
long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND);
assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong)));
}
private String isoFormat(Date date) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US);
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
return sdf.format(date);
}
public void testDateToolsUTC() throws Exception {
// Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London
long time = 1130630400;
try {
TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London"));
String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE);
String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE);
assertFalse("different times", d1.equals(d2));
assertEquals("midnight", DateTools.stringToTime(d1), time*1000);
assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000);
} finally {
TimeZone.setDefault(null);
}
}
}

View File

@ -161,7 +161,7 @@ public class TestDocument extends LuceneTestCase
writer.addDocument(makeDocumentWithFields());
writer.close();
Searcher searcher = new IndexSearcher(dir);
Searcher searcher = new IndexSearcher(dir, true);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
@ -236,7 +236,7 @@ public class TestDocument extends LuceneTestCase
writer.addDocument(doc);
writer.close();
Searcher searcher = new IndexSearcher(dir);
Searcher searcher = new IndexSearcher(dir, true);
Query query = new TermQuery(new Term("keyword", "test"));

View File

@ -1,82 +0,0 @@
package org.apache.lucene.document;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
public class TestNumberTools extends LuceneTestCase {
public void testNearZero() {
for (int i = -100; i <= 100; i++) {
for (int j = -100; j <= 100; j++) {
subtestTwoLongs(i, j);
}
}
}
public void testMax() {
// make sure the constants convert to their equivelents
assertEquals(Long.MAX_VALUE, NumberTools
.stringToLong(NumberTools.MAX_STRING_VALUE));
assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools
.longToString(Long.MAX_VALUE));
// test near MAX, too
for (long l = Long.MAX_VALUE; l > Long.MAX_VALUE - 10000; l--) {
subtestTwoLongs(l, l - 1);
}
}
public void testMin() {
// make sure the constants convert to their equivelents
assertEquals(Long.MIN_VALUE, NumberTools
.stringToLong(NumberTools.MIN_STRING_VALUE));
assertEquals(NumberTools.MIN_STRING_VALUE, NumberTools
.longToString(Long.MIN_VALUE));
// test near MIN, too
for (long l = Long.MIN_VALUE; l < Long.MIN_VALUE + 10000; l++) {
subtestTwoLongs(l, l + 1);
}
}
private static void subtestTwoLongs(long i, long j) {
// convert to strings
String a = NumberTools.longToString(i);
String b = NumberTools.longToString(j);
// are they the right length?
assertEquals(NumberTools.STR_SIZE, a.length());
assertEquals(NumberTools.STR_SIZE, b.length());
// are they the right order?
if (i < j) {
assertTrue(a.compareTo(b) < 0);
} else if (i > j) {
assertTrue(a.compareTo(b) > 0);
} else {
assertEquals(a, b);
}
// can we convert them back to longs?
long i2 = NumberTools.stringToLong(a);
long j2 = NumberTools.stringToLong(b);
assertEquals(i, i2);
assertEquals(j, j2);
}
}

View File

@ -358,7 +358,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
setUpDirs(dir, aux);
IndexReader reader = IndexReader.open(aux);
IndexReader reader = IndexReader.open(aux, false);
for (int i = 0; i < 20; i++) {
reader.deleteDocument(i);
}
@ -396,14 +396,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
assertEquals(3, writer.getSegmentCount());
writer.close();
IndexReader reader = IndexReader.open(aux);
IndexReader reader = IndexReader.open(aux, false);
for (int i = 0; i < 27; i++) {
reader.deleteDocument(i);
}
assertEquals(3, reader.numDocs());
reader.close();
reader = IndexReader.open(aux2);
reader = IndexReader.open(aux2, false);
for (int i = 0; i < 8; i++) {
reader.deleteDocument(i);
}
@ -449,7 +449,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
}
private void verifyNumDocs(Directory dir, int numDocs) throws IOException {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(numDocs, reader.maxDoc());
assertEquals(numDocs, reader.numDocs());
reader.close();
@ -457,7 +457,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
private void verifyTermDocs(Directory dir, Term term, int numDocs)
throws IOException {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermDocs termDocs = reader.termDocs(term);
int count = 0;
while (termDocs.next())

View File

@ -111,7 +111,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
}
public void doWork() throws Throwable {
IndexReader r = IndexReader.open(directory);
IndexReader r = IndexReader.open(directory, true);
assertEquals(100, r.numDocs());
r.close();
}
@ -138,7 +138,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
}
writer.commit();
IndexReader r = IndexReader.open(directory);
IndexReader r = IndexReader.open(directory, true);
assertEquals(100, r.numDocs());
r.close();

View File

@ -1,543 +0,0 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
/*
Verify we can read the pre-2.1 file format, do searches
against it, and add documents to it.
*/
public class TestBackwardsCompatibility extends LuceneTestCase
{
// Uncomment these cases & run them on an older Lucene
// version, to generate an index to test backwards
// compatibility. Then, cd to build/test/index.cfs and
// run "zip index.<VERSION>.cfs.zip *"; cd to
// build/test/index.nocfs and run "zip
// index.<VERSION>.nocfs.zip *". Then move those 2 zip
// files to your trunk checkout and add them to the
// oldNames array.
/*
public void testCreatePreLocklessCFS() throws IOException {
createIndex("index.cfs", true);
}
public void testCreatePreLocklessNoCFS() throws IOException {
createIndex("index.nocfs", false);
}
*/
/* Unzips dirName + ".zip" --> dirName, removing dirName
first */
public void unzip(String zipName, String destDirName) throws IOException {
Enumeration entries;
ZipFile zipFile;
zipFile = new ZipFile(zipName + ".zip");
entries = zipFile.entries();
String dirName = fullDir(destDirName);
File fileDir = new File(dirName);
rmDir(destDirName);
fileDir.mkdir();
while (entries.hasMoreElements()) {
ZipEntry entry = (ZipEntry) entries.nextElement();
InputStream in = zipFile.getInputStream(entry);
OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName())));
byte[] buffer = new byte[8192];
int len;
while((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
in.close();
out.close();
}
zipFile.close();
}
public void testCreateCFS() throws IOException {
String dirName = "testindex.cfs";
createIndex(dirName, true);
rmDir(dirName);
}
public void testCreateNoCFS() throws IOException {
String dirName = "testindex.nocfs";
createIndex(dirName, true);
rmDir(dirName);
}
final String[] oldNames = {"19.cfs",
"19.nocfs",
"20.cfs",
"20.nocfs",
"21.cfs",
"21.nocfs",
"22.cfs",
"22.nocfs",
"23.cfs",
"23.nocfs",
"24.cfs",
"24.nocfs",
};
public void testOptimizeOldIndex() throws IOException {
for(int i=0;i<oldNames.length;i++) {
String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
unzip(dirName, oldNames[i]);
String fullPath = fullDir(oldNames[i]);
Directory dir = FSDirectory.open(new File(fullPath));
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
_TestUtil.checkIndex(dir);
dir.close();
rmDir(oldNames[i]);
}
}
public void testSearchOldIndex() throws IOException {
for(int i=0;i<oldNames.length;i++) {
String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
unzip(dirName, oldNames[i]);
searchIndex(oldNames[i], oldNames[i]);
rmDir(oldNames[i]);
}
}
public void testIndexOldIndexNoAdds() throws IOException {
for(int i=0;i<oldNames.length;i++) {
String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
unzip(dirName, oldNames[i]);
changeIndexNoAdds(oldNames[i], true);
rmDir(oldNames[i]);
unzip(dirName, oldNames[i]);
changeIndexNoAdds(oldNames[i], false);
rmDir(oldNames[i]);
}
}
public void testIndexOldIndex() throws IOException {
for(int i=0;i<oldNames.length;i++) {
String dirName = "src/test/org/apache/lucene/index/index." + oldNames[i];
unzip(dirName, oldNames[i]);
changeIndexWithAdds(oldNames[i], true);
rmDir(oldNames[i]);
unzip(dirName, oldNames[i]);
changeIndexWithAdds(oldNames[i], false);
rmDir(oldNames[i]);
}
}
private void testHits(ScoreDoc[] hits, int expectedCount, IndexReader reader) throws IOException {
final int hitCount = hits.length;
assertEquals("wrong number of hits", expectedCount, hitCount);
for(int i=0;i<hitCount;i++) {
reader.document(hits[i].doc);
reader.getTermFreqVectors(hits[i].doc);
}
}
public void searchIndex(String dirName, String oldName) throws IOException {
//QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer());
//Query query = parser.parse("handle:1");
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
IndexSearcher searcher = new IndexSearcher(dir);
IndexReader reader = searcher.getIndexReader();
_TestUtil.checkIndex(dir);
for(int i=0;i<35;i++) {
if (!reader.isDeleted(i)) {
Document d = reader.document(i);
List fields = d.getFields();
if (!oldName.startsWith("19.") &&
!oldName.startsWith("20.") &&
!oldName.startsWith("21.") &&
!oldName.startsWith("22.")) {
if (d.getField("content3") == null) {
assertEquals(5, fields.size());
Field f = (Field) d.getField("id");
assertEquals(""+i, f.stringValue());
f = (Field) d.getField("utf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = (Field) d.getField("autf8");
assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
f = (Field) d.getField("content2");
assertEquals("here is more content with aaa aaa aaa", f.stringValue());
f = (Field) d.getField("fie\u2C77ld");
assertEquals("field with non-ascii name", f.stringValue());
}
}
} else
// Only ID 7 is deleted
assertEquals(7, i);
}
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
// First document should be #21 since it's norm was
// increased:
Document d = searcher.doc(hits[0].doc);
assertEquals("didn't get the right document first", "21", d.get("id"));
testHits(hits, 34, searcher.getIndexReader());
if (!oldName.startsWith("19.") &&
!oldName.startsWith("20.") &&
!oldName.startsWith("21.") &&
!oldName.startsWith("22.")) {
// Test on indices >= 2.3
hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs;
assertEquals(34, hits.length);
}
searcher.close();
dir.close();
}
private int compare(String name, String v) {
int v0 = Integer.parseInt(name.substring(0, 2));
int v1 = Integer.parseInt(v);
return v0 - v1;
}
/* Open pre-lockless index, add docs, do a delete &
* setNorm, and search */
public void changeIndexWithAdds(String dirName, boolean autoCommit) throws IOException {
String origDirName = dirName;
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
// open writer
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
// add 10 docs
for(int i=0;i<10;i++) {
addDoc(writer, 35+i);
}
// make sure writer sees right total -- writer seems not to know about deletes in .del?
final int expected;
if (compare(origDirName, "24") < 0) {
expected = 45;
} else {
expected = 46;
}
assertEquals("wrong doc count", expected, writer.docCount());
writer.close();
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Document d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
testHits(hits, 44, searcher.getIndexReader());
searcher.close();
// make sure we can do delete & setNorm against this
// pre-lockless segment:
IndexReader reader = IndexReader.open(dir);
Term searchTerm = new Term("id", "6");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("wrong delete count", 1, delCount);
reader.setNorm(22, "content", (float) 2.0);
reader.close();
// make sure they "took":
searcher = new IndexSearcher(dir);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
testHits(hits, 43, searcher.getIndexReader());
searcher.close();
// optimize
writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
writer.optimize();
writer.close();
searcher = new IndexSearcher(dir);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
d = searcher.doc(hits[0].doc);
testHits(hits, 43, searcher.getIndexReader());
assertEquals("wrong first document", "22", d.get("id"));
searcher.close();
dir.close();
}
/* Open pre-lockless index, add docs, do a delete &
* setNorm, and search */
public void changeIndexNoAdds(String dirName, boolean autoCommit) throws IOException {
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
Document d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
searcher.close();
// make sure we can do a delete & setNorm against this
// pre-lockless segment:
IndexReader reader = IndexReader.open(dir);
Term searchTerm = new Term("id", "6");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("wrong delete count", 1, delCount);
reader.setNorm(22, "content", (float) 2.0);
reader.close();
// make sure they "took":
searcher = new IndexSearcher(dir);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
testHits(hits, 33, searcher.getIndexReader());
searcher.close();
// optimize
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false);
writer.optimize();
writer.close();
searcher = new IndexSearcher(dir);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
testHits(hits, 33, searcher.getIndexReader());
searcher.close();
dir.close();
}
public void createIndex(String dirName, boolean doCFS) throws IOException {
rmDir(dirName);
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
for(int i=0;i<35;i++) {
addDoc(writer, i);
}
assertEquals("wrong doc count", 35, writer.docCount());
writer.close();
// open fresh writer so we get no prx file in the added segment
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
addNoProxDoc(writer);
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
// Set one norm so we get a .s0 file:
reader.setNorm(21, "content", (float) 1.5);
reader.close();
}
/* Verifies that the expected file names were produced */
public void testExactFileNames() throws IOException {
for(int pass=0;pass<2;pass++) {
String outputDir = "lucene.backwardscompat0.index";
rmDir(outputDir);
try {
Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
boolean autoCommit = 0 == pass;
IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true);
writer.setRAMBufferSizeMB(16.0);
for(int i=0;i<35;i++) {
addDoc(writer, i);
}
assertEquals("wrong doc count", 35, writer.docCount());
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
// Set one norm so we get a .s0 file:
reader.setNorm(21, "content", (float) 1.5);
reader.close();
// The numbering of fields can vary depending on which
// JRE is in use. On some JREs we see content bound to
// field 0; on others, field 1. So, here we have to
// figure out which field number corresponds to
// "content", and then set our expected file names below
// accordingly:
CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs");
FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm");
int contentFieldIndex = -1;
for(int i=0;i<fieldInfos.size();i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.name.equals("content")) {
contentFieldIndex = i;
break;
}
}
cfsReader.close();
assertTrue("could not locate the 'content' field number in the _2.cfs segment", contentFieldIndex != -1);
// Now verify file names:
String[] expected;
expected = new String[] {"_0.cfs",
"_0_1.del",
"_0_1.s" + contentFieldIndex,
"segments_3",
"segments.gen"};
String[] actual = dir.listAll();
Arrays.sort(expected);
Arrays.sort(actual);
if (!Arrays.equals(expected, actual)) {
fail("incorrect filenames in index: expected:\n " + asString(expected) + "\n actual:\n " + asString(actual));
}
dir.close();
} finally {
rmDir(outputDir);
}
}
}
private String asString(String[] l) {
String s = "";
for(int i=0;i<l.length;i++) {
if (i > 0) {
s += "\n ";
}
s += l[i];
}
return s;
}
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
private void addNoProxDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED);
f.setOmitTf(true);
doc.add(f);
f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO);
f.setOmitTf(true);
doc.add(f);
writer.addDocument(doc);
}
private void rmDir(String dir) throws IOException {
File fileDir = new File(fullDir(dir));
if (fileDir.exists()) {
File[] files = fileDir.listFiles();
if (files != null) {
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
}
fileDir.delete();
}
}
public static String fullDir(String dirName) throws IOException {
return new File(System.getProperty("tempDir"), dirName).getCanonicalPath();
}
}

View File

@ -43,7 +43,7 @@ public class TestCheckIndex extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(5);
reader.close();
@ -90,6 +90,7 @@ public class TestCheckIndex extends LuceneTestCase {
assertTrue(checker.checkIndex(onlySegments).clean == true);
}
/* Does not work, because compilation puts final field from Constants of 2.9 into class file:
public void testLuceneConstantVersion() throws IOException {
// common-build.xml sets lucene.version
final String version = System.getProperty("lucene.version");
@ -97,5 +98,5 @@ public class TestCheckIndex extends LuceneTestCase {
assertTrue(version.equals(Constants.LUCENE_MAIN_VERSION+"-dev") ||
version.equals(Constants.LUCENE_MAIN_VERSION));
assertTrue(Constants.LUCENE_VERSION.startsWith(version));
}
}*/
}

View File

@ -88,7 +88,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
assertEquals(200, reader.numDocs());
reader.close();
directory.close();
@ -131,7 +131,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
// Verify that we did not lose any deletes...
assertEquals(450, reader.numDocs());
reader.close();
@ -207,7 +207,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
writer.close(false);
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
assertEquals((1+iter)*182, reader.numDocs());
reader.close();

View File

@ -61,7 +61,7 @@ public class TestCrash extends LuceneTestCase {
IndexWriter writer = initIndex();
MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
crash(writer);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertTrue(reader.numDocs() < 157);
}
@ -73,7 +73,7 @@ public class TestCrash extends LuceneTestCase {
writer = initIndex(dir);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertTrue(reader.numDocs() < 314);
}
@ -94,7 +94,7 @@ public class TestCrash extends LuceneTestCase {
dir.fileLength(l[i]) + " bytes");
*/
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertTrue(reader.numDocs() >= 157);
}
@ -113,7 +113,7 @@ public class TestCrash extends LuceneTestCase {
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertEquals(157, reader.numDocs());
}
@ -132,7 +132,7 @@ public class TestCrash extends LuceneTestCase {
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertEquals(157, reader.numDocs());
}
@ -142,7 +142,7 @@ public class TestCrash extends LuceneTestCase {
MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
writer.close(false);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(3);
dir.crash();
@ -153,7 +153,7 @@ public class TestCrash extends LuceneTestCase {
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals(157, reader.numDocs());
}
@ -163,7 +163,7 @@ public class TestCrash extends LuceneTestCase {
MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory();
writer.close(false);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(3);
reader.close();
@ -175,7 +175,7 @@ public class TestCrash extends LuceneTestCase {
for(int i=0;i<l.length;i++)
System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes");
*/
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals(156, reader.numDocs());
}
}

View File

@ -74,7 +74,7 @@ public class TestDeletionPolicy extends LuceneTestCase
}
public void onCommit(List commits) throws IOException {
IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
r.close();
verifyCommitOrder(commits);
@ -243,7 +243,7 @@ public class TestDeletionPolicy extends LuceneTestCase
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
while(gen > 0) {
try {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
reader.close();
fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
@ -314,7 +314,7 @@ public class TestDeletionPolicy extends LuceneTestCase
// Make sure we can open a reader on each commit:
while(it.hasNext()) {
IndexCommit commit = (IndexCommit) it.next();
IndexReader r = IndexReader.open(commit, null);
IndexReader r = IndexReader.open(commit, null, false);
r.close();
}
@ -323,7 +323,7 @@ public class TestDeletionPolicy extends LuceneTestCase
dir.deleteFile(IndexFileNames.SEGMENTS_GEN);
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
while(gen > 0) {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen));
gen--;
@ -392,7 +392,7 @@ public class TestDeletionPolicy extends LuceneTestCase
// Should undo our rollback:
writer.rollback();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
// Still optimized, still 11 docs
assertTrue(r.isOptimized());
assertEquals(11, r.numDocs());
@ -406,7 +406,7 @@ public class TestDeletionPolicy extends LuceneTestCase
// Now 8 because we made another commit
assertEquals(8, IndexReader.listCommits(dir).size());
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
// Not optimized because we rolled it back, and now only
// 10 docs
assertTrue(!r.isOptimized());
@ -418,7 +418,7 @@ public class TestDeletionPolicy extends LuceneTestCase
writer.optimize();
writer.close();
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
assertTrue(r.isOptimized());
assertEquals(10, r.numDocs());
r.close();
@ -430,7 +430,7 @@ public class TestDeletionPolicy extends LuceneTestCase
// Reader still sees optimized index, because writer
// opened on the prior commit has not yet committed:
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
assertTrue(r.isOptimized());
assertEquals(10, r.numDocs());
r.close();
@ -438,7 +438,7 @@ public class TestDeletionPolicy extends LuceneTestCase
writer.close();
// Now reader sees unoptimized index:
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
assertTrue(!r.isOptimized());
assertEquals(10, r.numDocs());
r.close();
@ -483,7 +483,7 @@ public class TestDeletionPolicy extends LuceneTestCase
// Simplistic check: just verify the index is in fact
// readable:
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
@ -531,7 +531,7 @@ public class TestDeletionPolicy extends LuceneTestCase
long gen = SegmentInfos.getCurrentSegmentGeneration(dir);
for(int i=0;i<N+1;i++) {
try {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
reader.close();
if (i == N) {
fail("should have failed on commits prior to last " + N);
@ -581,7 +581,7 @@ public class TestDeletionPolicy extends LuceneTestCase
}
// this is a commit when autoCommit=false:
writer.close();
IndexReader reader = IndexReader.open(dir, policy);
IndexReader reader = IndexReader.open(dir, policy, false);
reader.deleteDocument(3*i+1);
reader.setNorm(4*i+1, "content", 2.0F);
IndexSearcher searcher = new IndexSearcher(reader);
@ -601,7 +601,7 @@ public class TestDeletionPolicy extends LuceneTestCase
if (!autoCommit)
assertEquals(2*(N+2)-1, policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(176, hits.length);
@ -614,7 +614,7 @@ public class TestDeletionPolicy extends LuceneTestCase
for(int i=0;i<N+1;i++) {
try {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// Work backwards in commits on what the expected
// count should be. Only check this in the
@ -684,7 +684,7 @@ public class TestDeletionPolicy extends LuceneTestCase
}
// this is a commit when autoCommit=false:
writer.close();
IndexReader reader = IndexReader.open(dir, policy);
IndexReader reader = IndexReader.open(dir, policy, false);
reader.deleteDocument(3);
reader.setNorm(5, "content", 2.0F);
IndexSearcher searcher = new IndexSearcher(reader);
@ -704,7 +704,7 @@ public class TestDeletionPolicy extends LuceneTestCase
if (!autoCommit)
assertEquals(3*(N+1), policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
@ -717,7 +717,7 @@ public class TestDeletionPolicy extends LuceneTestCase
for(int i=0;i<N+1;i++) {
try {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// Work backwards in commits on what the expected
// count should be. Only check this in the

View File

@ -54,7 +54,7 @@ public class TestDirectoryReader extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
IndexReader reader;
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertTrue(reader instanceof DirectoryReader);
assertTrue(dir != null);
@ -133,7 +133,7 @@ public class TestDirectoryReader extends LuceneTestCase {
addDoc(ramDir1, "test foo", true);
RAMDirectory ramDir2=new RAMDirectory();
addDoc(ramDir2, "test blah", true);
IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2)};
IndexReader[] readers = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false)};
MultiReader mr = new MultiReader(readers);
assertTrue(mr.isCurrent()); // just opened, must be current
addDoc(ramDir1, "more text", false);
@ -157,8 +157,8 @@ public class TestDirectoryReader extends LuceneTestCase {
RAMDirectory ramDir3=new RAMDirectory();
addDoc(ramDir3, "test wow", true);
IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir3)};
IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1), IndexReader.open(ramDir2), IndexReader.open(ramDir3)};
IndexReader[] readers1 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir3, false)};
IndexReader[] readers2 = new IndexReader[]{IndexReader.open(ramDir1, false), IndexReader.open(ramDir2, false), IndexReader.open(ramDir3, false)};
MultiReader mr2 = new MultiReader(readers1);
MultiReader mr3 = new MultiReader(readers2);

View File

@ -265,7 +265,7 @@ public class TestDocumentWriter extends BaseTokenStreamTestCase {
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// f1
TermFreqVector tfv1 = reader.getTermFreqVector(0, "f1");
assertNotNull(tfv1);

View File

@ -398,7 +398,7 @@ public class TestFieldsReader extends LuceneTestCase {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
FaultyIndexInput.doFail = true;

View File

@ -110,7 +110,7 @@ public class TestFilterIndexReader extends LuceneTestCase {
writer.close();
IndexReader reader = new TestReader(IndexReader.open(directory));
IndexReader reader = new TestReader(IndexReader.open(directory, true));
assertTrue(reader.isOptimized());

View File

@ -54,7 +54,7 @@ public class TestIndexFileDeleter extends LuceneTestCase
writer.close();
// Delete one doc so we get a .del file:
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);

View File

@ -1,281 +0,0 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import java.io.File;
import java.io.IOException;
import java.util.EmptyStackException;
import java.util.Random;
import java.util.Stack;
/**
* Tests for the "IndexModifier" class, including accesses from two threads at the
* same time.
*
* @deprecated
*/
public class TestIndexModifier extends LuceneTestCase {
private int docCount = 0;
private final Term allDocTerm = new Term("all", "x");
public void testIndex() throws IOException {
Directory ramDir = new RAMDirectory();
IndexModifier i = new IndexModifier(ramDir, new StandardAnalyzer(), true);
i.addDocument(getDoc());
assertEquals(1, i.docCount());
i.flush();
i.addDocument(getDoc(), new SimpleAnalyzer());
assertEquals(2, i.docCount());
i.optimize();
assertEquals(2, i.docCount());
i.flush();
i.deleteDocument(0);
assertEquals(1, i.docCount());
i.flush();
assertEquals(1, i.docCount());
i.addDocument(getDoc());
i.addDocument(getDoc());
i.flush();
// depend on merge policy - assertEquals(3, i.docCount());
i.deleteDocuments(allDocTerm);
assertEquals(0, i.docCount());
i.optimize();
assertEquals(0, i.docCount());
// Lucene defaults:
assertNull(i.getInfoStream());
assertTrue(i.getUseCompoundFile());
assertEquals(IndexWriter.DISABLE_AUTO_FLUSH, i.getMaxBufferedDocs());
assertEquals(10000, i.getMaxFieldLength());
assertEquals(10, i.getMergeFactor());
// test setting properties:
i.setMaxBufferedDocs(100);
i.setMergeFactor(25);
i.setMaxFieldLength(250000);
i.addDocument(getDoc());
i.setUseCompoundFile(false);
i.flush();
assertEquals(100, i.getMaxBufferedDocs());
assertEquals(25, i.getMergeFactor());
assertEquals(250000, i.getMaxFieldLength());
assertFalse(i.getUseCompoundFile());
// test setting properties when internally the reader is opened:
i.deleteDocuments(allDocTerm);
i.setMaxBufferedDocs(100);
i.setMergeFactor(25);
i.setMaxFieldLength(250000);
i.addDocument(getDoc());
i.setUseCompoundFile(false);
i.optimize();
assertEquals(100, i.getMaxBufferedDocs());
assertEquals(25, i.getMergeFactor());
assertEquals(250000, i.getMaxFieldLength());
assertFalse(i.getUseCompoundFile());
i.close();
try {
i.docCount();
fail();
} catch (IllegalStateException e) {
// expected exception
}
}
public void testExtendedIndex() throws IOException {
Directory ramDir = new RAMDirectory();
PowerIndex powerIndex = new PowerIndex(ramDir, new StandardAnalyzer(), true);
powerIndex.addDocument(getDoc());
powerIndex.addDocument(getDoc());
powerIndex.addDocument(getDoc());
powerIndex.addDocument(getDoc());
powerIndex.addDocument(getDoc());
powerIndex.flush();
assertEquals(5, powerIndex.docFreq(allDocTerm));
powerIndex.close();
}
private Document getDoc() {
Document doc = new Document();
doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.NOT_ANALYZED));
docCount++;
return doc;
}
public void testIndexWithThreads() throws Exception {
testIndexInternal(0);
testIndexInternal(10);
testIndexInternal(50);
}
private void testIndexInternal(int maxWait) throws Exception {
final boolean create = true;
//Directory rd = new RAMDirectory();
// work on disk to make sure potential lock problems are tested:
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
throw new IOException("java.io.tmpdir undefined, cannot run test");
File indexDir = new File(tempDir, "lucenetestindex");
Directory rd = FSDirectory.open(indexDir);
IndexThread.id = 0;
IndexThread.idStack.clear();
IndexModifier index = new IndexModifier(rd, new StandardAnalyzer(), create);
IndexThread thread1 = new IndexThread(index, maxWait, 1);
thread1.start();
IndexThread thread2 = new IndexThread(index, maxWait, 2);
thread2.start();
while(thread1.isAlive() || thread2.isAlive()) {
Thread.sleep(100);
}
index.optimize();
int added = thread1.added + thread2.added;
int deleted = thread1.deleted + thread2.deleted;
assertEquals(added-deleted, index.docCount());
index.close();
try {
index.close();
fail();
} catch(IllegalStateException e) {
// expected exception
}
rmDir(indexDir);
}
private void rmDir(File dir) {
File[] files = dir.listFiles();
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
dir.delete();
}
private class PowerIndex extends IndexModifier {
public PowerIndex(Directory dir, Analyzer analyzer, boolean create) throws IOException {
super(dir, analyzer, create);
}
public int docFreq(Term term) throws IOException {
synchronized(directory) {
assureOpen();
createIndexReader();
return indexReader.docFreq(term);
}
}
}
}
class IndexThread extends Thread {
private final static int TEST_SECONDS = 3; // how many seconds to run each test
static int id = 0;
static Stack idStack = new Stack();
int added = 0;
int deleted = 0;
private int maxWait = 10;
private IndexModifier index;
private int threadNumber;
private Random random;
IndexThread(IndexModifier index, int maxWait, int threadNumber) {
this.index = index;
this.maxWait = maxWait;
this.threadNumber = threadNumber;
// TODO: test case is not reproducible despite pseudo-random numbers:
random = new Random(101+threadNumber); // constant seed for better reproducability
}
public void run() {
final long endTime = System.currentTimeMillis() + 1000*TEST_SECONDS;
try {
while(System.currentTimeMillis() < endTime) {
int rand = random.nextInt(101);
if (rand < 5) {
index.optimize();
} else if (rand < 60) {
Document doc = getDocument();
index.addDocument(doc);
idStack.push(doc.get("id"));
added++;
} else {
// we just delete the last document added and remove it
// from the id stack so that it won't be removed twice:
String delId = null;
try {
delId = (String)idStack.pop();
} catch (EmptyStackException e) {
continue;
}
Term delTerm = new Term("id", Integer.valueOf(delId).toString());
int delCount = index.deleteDocuments(delTerm);
if (delCount != 1) {
throw new RuntimeException("Internal error: " + threadNumber + " deleted " + delCount +
" documents, term=" + delTerm);
}
deleted++;
}
if (maxWait > 0) {
rand = random.nextInt(maxWait);
//System.out.println("waiting " + rand + "ms");
try {
Thread.sleep(rand);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private Document getDocument() {
Document doc = new Document();
synchronized (getClass()) {
doc.add(new Field("id", Integer.toString(id), Field.Store.YES,
Field.Index.NOT_ANALYZED));
id++;
}
// add random stuff:
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.ANALYZED));
return doc;
}
}

View File

@ -82,14 +82,14 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
writer.close();
IndexReader r = IndexReader.open(d);
IndexReader r = IndexReader.open(d, false);
r.deleteDocument(5);
r.flush(commitUserData);
r.close();
SegmentInfos sis = new SegmentInfos();
sis.read(d);
IndexReader r2 = IndexReader.open(d);
IndexReader r2 = IndexReader.open(d, false);
IndexCommit c = r.getIndexCommit();
assertEquals(c.getUserData(), commitUserData);
@ -127,7 +127,7 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
writer.close();
// set up reader:
IndexReader reader = IndexReader.open(d);
IndexReader reader = IndexReader.open(d, false);
assertTrue(reader.isCurrent());
// modify index by adding another document:
writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
@ -155,7 +155,7 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
writer.close();
// set up reader
IndexReader reader = IndexReader.open(d);
IndexReader reader = IndexReader.open(d, false);
Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
assertTrue(fieldNames.contains("keyword"));
assertTrue(fieldNames.contains("text"));
@ -182,7 +182,7 @@ public class TestIndexReader extends LuceneTestCase
writer.close();
// verify fields again
reader = IndexReader.open(d);
reader = IndexReader.open(d, false);
fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
assertEquals(13, fieldNames.size()); // the following fields
assertTrue(fieldNames.contains("keyword"));
@ -257,7 +257,7 @@ public class TestIndexReader extends LuceneTestCase
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(d);
IndexReader reader = IndexReader.open(d, false);
FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.getTermFreqVector(0, mapper);
Map map = mapper.getFieldToTerms();
@ -320,14 +320,14 @@ public class TestIndexReader extends LuceneTestCase
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("first reader", reader, searchTerm, 100);
reader.close();
// DELETE DOCUMENTS CONTAINING TERM: aaa
int deleted = 0;
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
@ -336,11 +336,11 @@ public class TestIndexReader extends LuceneTestCase
// open a 2nd reader to make sure first reader can
// commit its changes (.del) while second reader
// is open:
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, false);
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
reader.close();
@ -369,7 +369,7 @@ public class TestIndexReader extends LuceneTestCase
doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
doc = reader.document(reader.maxDoc() - 1);
Field[] fields = doc.getFields("bin1");
assertNotNull(fields);
@ -413,7 +413,7 @@ public class TestIndexReader extends LuceneTestCase
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
doc = reader.document(reader.maxDoc() - 1);
fields = doc.getFields("bin1");
assertNotNull(fields);
@ -456,7 +456,7 @@ public class TestIndexReader extends LuceneTestCase
}
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
// Close reader:
reader.close();
@ -501,7 +501,7 @@ public class TestIndexReader extends LuceneTestCase
}
// Create reader:
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
// Try to make changes
try {
@ -548,7 +548,7 @@ public class TestIndexReader extends LuceneTestCase
writer.close();
// now open reader & set norm for doc 0
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
reader.setNorm(0, "content", (float) 2.0);
// we should be holding the write lock now:
@ -560,7 +560,7 @@ public class TestIndexReader extends LuceneTestCase
assertTrue("not locked", !IndexReader.isLocked(dir));
// open a 2nd reader:
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, false);
// set norm again for doc 0
reader.setNorm(0, "content", (float) 3.0);
@ -595,12 +595,12 @@ public class TestIndexReader extends LuceneTestCase
// now open reader & set norm for doc 0 (writes to
// _0_1.s0)
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
reader.setNorm(0, "content", (float) 2.0);
reader.close();
// now open reader again & set norm for doc 0 (writes to _0_2.s0)
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
reader.setNorm(0, "content", (float) 2.0);
reader.close();
assertFalse("failed to remove first generation norms file on writing second generation",
@ -614,7 +614,7 @@ public class TestIndexReader extends LuceneTestCase
deleteReaderWriterConflict(false);
}
public void testOpenEmptyDirectory() throws IOException{
/* ??? public void testOpenEmptyDirectory() throws IOException{
String dirName = "test.empty";
File fileDirName = new File(dirName);
if (!fileDirName.exists()) {
@ -627,7 +627,7 @@ public class TestIndexReader extends LuceneTestCase
// GOOD
}
rmDir(fileDirName);
}
}*/
public void testDeleteReaderWriterConflictOptimized() throws IOException{
deleteReaderWriterConflict(true);
@ -651,7 +651,7 @@ public class TestIndexReader extends LuceneTestCase
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
@ -693,7 +693,7 @@ public class TestIndexReader extends LuceneTestCase
// Re-open index reader and try again. This time it should see
// the new data.
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
@ -708,7 +708,7 @@ public class TestIndexReader extends LuceneTestCase
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
@ -742,7 +742,7 @@ public class TestIndexReader extends LuceneTestCase
// Now open existing directory and test that reader closes all files
dir = getDirectory();
IndexReader reader1 = IndexReader.open(dir);
IndexReader reader1 = IndexReader.open(dir, false);
reader1.close();
dir.close();
@ -767,7 +767,7 @@ public class TestIndexReader extends LuceneTestCase
assertTrue(IndexReader.isLocked(dir)); // writer open, so dir is locked
writer.close();
assertTrue(IndexReader.indexExists(dir));
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertFalse(IndexReader.isLocked(dir)); // reader only, no lock
long version = IndexReader.lastModified(dir);
if (i == 1) {
@ -782,7 +782,7 @@ public class TestIndexReader extends LuceneTestCase
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertTrue("old lastModified is " + version + "; new lastModified is " + IndexReader.lastModified(dir), version <= IndexReader.lastModified(dir));
reader.close();
dir.close();
@ -802,7 +802,7 @@ public class TestIndexReader extends LuceneTestCase
assertTrue(IndexReader.isLocked(dir)); // writer open, so dir is locked
writer.close();
assertTrue(IndexReader.indexExists(dir));
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
assertFalse(IndexReader.isLocked(dir)); // reader only, no lock
long version = IndexReader.getCurrentVersion(dir);
reader.close();
@ -811,7 +811,7 @@ public class TestIndexReader extends LuceneTestCase
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertTrue("old version is " + version + "; new version is " + IndexReader.getCurrentVersion(dir), version < IndexReader.getCurrentVersion(dir));
reader.close();
dir.close();
@ -823,7 +823,7 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
writer.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
try {
reader.deleteDocument(0);
fail("expected lock");
@ -843,12 +843,12 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.deleteDocument(1);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
dir.close();
@ -860,11 +860,11 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.deleteDocument(1);
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
reader.undeleteAll();
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
@ -877,14 +877,14 @@ public class TestIndexReader extends LuceneTestCase
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(0);
reader.deleteDocument(1);
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
assertEquals(2, reader.numDocs()); // nothing has really been deleted thanks to undeleteAll()
reader.close();
dir.close();
@ -935,7 +935,7 @@ public class TestIndexReader extends LuceneTestCase
// the same files again.
dir.setPreventDoubleWrite(false);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
// For each disk size, first try to commit against
// dir that will hit random IOExceptions & disk
@ -1039,7 +1039,7 @@ public class TestIndexReader extends LuceneTestCase
// changed (transactional semantics):
IndexReader newReader = null;
try {
newReader = IndexReader.open(dir);
newReader = IndexReader.open(dir, false);
} catch (IOException e) {
e.printStackTrace();
fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
@ -1106,7 +1106,7 @@ public class TestIndexReader extends LuceneTestCase
addDoc(writer, "aaa");
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
// Try to delete an invalid docId, yet, within range
// of the final bits of the BitVector:
@ -1145,7 +1145,7 @@ public class TestIndexReader extends LuceneTestCase
addDoc(writer, "aaa");
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
try {
reader.deleteDocument(1);
fail("did not hit exception when deleting an invalid doc number");
@ -1157,7 +1157,7 @@ public class TestIndexReader extends LuceneTestCase
fail("write lock is still held after close");
}
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
try {
reader.setNorm(1, "content", (float) 2.0);
fail("did not hit exception when calling setNorm on an invalid doc number");
@ -1187,7 +1187,7 @@ public class TestIndexReader extends LuceneTestCase
"deletetest");
Directory dir = FSDirectory.open(dirFile);
try {
IndexReader.open(dir);
IndexReader.open(dir, false);
fail("expected FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
@ -1197,7 +1197,7 @@ public class TestIndexReader extends LuceneTestCase
// Make sure we still get a CorruptIndexException (not NPE):
try {
IndexReader.open(dir);
IndexReader.open(dir, false);
fail("expected FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
@ -1230,7 +1230,7 @@ public class TestIndexReader extends LuceneTestCase
// OPEN TWO READERS
// Both readers get segment info as exists at this time
IndexReader reader1 = IndexReader.open(dir);
IndexReader reader1 = IndexReader.open(dir, false);
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
@ -1238,7 +1238,7 @@ public class TestIndexReader extends LuceneTestCase
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, false);
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
@ -1279,7 +1279,7 @@ public class TestIndexReader extends LuceneTestCase
// RECREATE READER AND TRY AGAIN
reader1.close();
reader1 = IndexReader.open(dir);
reader1 = IndexReader.open(dir, false);
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
@ -1297,7 +1297,7 @@ public class TestIndexReader extends LuceneTestCase
reader1.close();
// Open another reader to confirm that everything is deleted
reader2 = IndexReader.open(dir);
reader2 = IndexReader.open(dir, false);
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm1));
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm2));
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm3));
@ -1449,7 +1449,7 @@ public class TestIndexReader extends LuceneTestCase
SegmentInfos sis = new SegmentInfos();
sis.read(d);
IndexReader r = IndexReader.open(d);
IndexReader r = IndexReader.open(d, false);
IndexCommit c = r.getIndexCommit();
assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());
@ -1537,42 +1537,6 @@ public class TestIndexReader extends LuceneTestCase
r3.close();
}
public void testFalseDirectoryAlreadyClosed() throws Throwable {
File indexDir = _TestUtil.getTempDir("lucenetestdiralreadyclosed");
try {
FSDirectory dir = FSDirectory.getDirectory(indexDir);
IndexWriter w = new IndexWriter(indexDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
w.setUseCompoundFile(false);
Document doc = new Document();
w.addDocument(doc);
w.close();
assertTrue(new File(indexDir, "_0.fnm").delete());
try {
IndexReader.open(indexDir);
fail("did not hit expected exception");
} catch (AlreadyClosedException ace) {
fail("should not have hit AlreadyClosedException");
} catch (FileNotFoundException ioe) {
// expected
}
// Make sure we really did close the dir inside IndexReader.open
dir.close();
try {
dir.fileExists("hi");
fail("did not hit expected exception");
} catch (AlreadyClosedException ace) {
// expected
}
} finally {
_TestUtil.rmDir(indexDir);
}
}
// LUCENE-1474
public void testIndexReader() throws Exception {
@ -1583,12 +1547,12 @@ public class TestIndexReader extends LuceneTestCase
writer.addDocument(createDocument("b"));
writer.addDocument(createDocument("c"));
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("id", "a"));
reader.flush();
reader.deleteDocuments(new Term("id", "b"));
reader.close();
IndexReader.open(dir).close();
IndexReader.open(dir,true).close();
}
// LUCENE-1647
@ -1601,14 +1565,14 @@ public class TestIndexReader extends LuceneTestCase
writer.addDocument(createDocument("b"));
writer.addDocument(createDocument("c"));
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("id", "a"));
reader.flush();
reader.deleteDocuments(new Term("id", "b"));
reader.undeleteAll();
reader.deleteDocuments(new Term("id", "b"));
reader.close();
IndexReader.open(dir).close();
IndexReader.open(dir,true).close();
dir.close();
}
@ -1624,7 +1588,7 @@ public class TestIndexReader extends LuceneTestCase
public void testNoDir() throws Throwable {
Directory dir = FSDirectory.open(_TestUtil.getTempDir("doesnotexist"));
try {
IndexReader.open(dir);
IndexReader.open(dir, true);
fail("did not hit expected exception");
} catch (NoSuchDirectoryException nsde) {
// expected
@ -1705,7 +1669,7 @@ public class TestIndexReader extends LuceneTestCase
writer.commit();
// Open reader1
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, false);
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
assertEquals(1, ints.length);
@ -1737,7 +1701,7 @@ public class TestIndexReader extends LuceneTestCase
writer.commit();
// Open reader1
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, false);
assertTrue(r instanceof DirectoryReader);
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
@ -1779,7 +1743,7 @@ public class TestIndexReader extends LuceneTestCase
writer.addDocument(doc);
writer.commit();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, false);
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
assertEquals(36, r1.getUniqueTermCount());
writer.addDocument(doc);

View File

@ -43,7 +43,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader reader = IndexReader.open(dir1);
IndexReader reader = IndexReader.open(dir1, false);
IndexReader readOnlyReader = reader.clone(true);
if (!isReadOnly(readOnlyReader)) {
fail("reader isn't read only");
@ -56,34 +56,6 @@ public class TestIndexReaderClone extends LuceneTestCase {
dir1.close();
}
// LUCENE-1453
public void testFSDirectoryClone() throws Exception {
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
throw new IOException("java.io.tmpdir undefined, cannot run test");
File indexDir2 = new File(tempDir, "FSDirIndexReaderClone");
Directory dir1 = FSDirectory.getDirectory(indexDir2);
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader reader = IndexReader.open(indexDir2);
IndexReader readOnlyReader = (IndexReader) reader.clone();
reader.close();
readOnlyReader.close();
// Make sure we didn't pick up too many incRef's along
// the way -- this close should be the final close:
dir1.close();
try {
dir1.listAll();
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException ace) {
// expected
}
}
// open non-readOnly reader1, clone to non-readOnly
// reader2, make sure we can change reader2
public void testCloneNoChangesStillReadOnly() throws Exception {
@ -255,7 +227,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, true);
IndexReader reader = IndexReader.open(dir1);
IndexReader reader = IndexReader.open(dir1, false);
IndexReader readOnlyReader = reader.clone(true);
if (!isReadOnly(readOnlyReader)) {
fail("reader isn't read only");
@ -277,8 +249,8 @@ public class TestIndexReaderClone extends LuceneTestCase {
TestIndexReaderReopen.createIndex(dir1, true);
final Directory dir2 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir2, true);
IndexReader r1 = IndexReader.open(dir1);
IndexReader r2 = IndexReader.open(dir2);
IndexReader r1 = IndexReader.open(dir1, false);
IndexReader r2 = IndexReader.open(dir2, false);
ParallelReader pr1 = new ParallelReader();
pr1.add(r1);
@ -327,8 +299,8 @@ public class TestIndexReaderClone extends LuceneTestCase {
TestIndexReaderReopen.createIndex(dir1, true);
final Directory dir2 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir2, true);
IndexReader r1 = IndexReader.open(dir1);
IndexReader r2 = IndexReader.open(dir2);
IndexReader r1 = IndexReader.open(dir1, false);
IndexReader r2 = IndexReader.open(dir2, false);
MultiReader multiReader = new MultiReader(new IndexReader[] { r1, r2 });
performDefaultTests(multiReader);
@ -373,7 +345,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader origReader = IndexReader.open(dir1);
IndexReader origReader = IndexReader.open(dir1, false);
SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(origReader);
// deletedDocsRef should be null because nothing has updated yet
assertNull(origSegmentReader.deletedDocsRef);
@ -435,14 +407,14 @@ public class TestIndexReaderClone extends LuceneTestCase {
public void testCloneWithDeletes() throws Throwable {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader origReader = IndexReader.open(dir1);
IndexReader origReader = IndexReader.open(dir1, false);
origReader.deleteDocument(1);
IndexReader clonedReader = (IndexReader) origReader.clone();
origReader.close();
clonedReader.close();
IndexReader r = IndexReader.open(dir1);
IndexReader r = IndexReader.open(dir1, false);
assertTrue(r.isDeleted(1));
r.close();
dir1.close();
@ -452,7 +424,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
public void testCloneWithSetNorm() throws Throwable {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader orig = IndexReader.open(dir1);
IndexReader orig = IndexReader.open(dir1, false);
orig.setNorm(1, "field1", 17.0f);
final byte encoded = Similarity.encodeNorm(17.0f);
assertEquals(encoded, orig.norms("field1")[1]);
@ -463,7 +435,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
orig.close();
clonedReader.close();
IndexReader r = IndexReader.open(dir1);
IndexReader r = IndexReader.open(dir1, false);
assertEquals(encoded, r.norms("field1")[1]);
r.close();
dir1.close();
@ -482,7 +454,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, true);
IndexReader reader = IndexReader.open(dir1);
IndexReader reader = IndexReader.open(dir1, false);
reader.deleteDocument(1); // acquire write lock
IndexReader[] subs = reader.getSequentialSubReaders();
assert subs.length > 1;
@ -501,7 +473,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
public void testLucene1516Bug() throws Exception {
final Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader r1 = IndexReader.open(dir1);
IndexReader r1 = IndexReader.open(dir1, false);
r1.incRef();
IndexReader r2 = r1.clone(false);
r1.deleteDocument(5);
@ -523,7 +495,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
doc.add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader r1 = IndexReader.open(dir);
IndexReader r1 = IndexReader.open(dir, false);
IndexReader r2 = r1.clone(false);
r1.close();
r2.close();

View File

@ -148,10 +148,10 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
// try cloning and reopening the norms
private void doTestNorms(Directory dir) throws IOException {
addDocs(dir, 12, true);
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
verifyIndex(ir);
modifyNormsForF1(ir);
IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir);//ir.clone();
IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir, false);//ir.clone();
verifyIndex(irc);
modifyNormsForF1(irc);
@ -183,7 +183,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
public void testNormsRefCounting() throws IOException {
Directory dir1 = new MockRAMDirectory();
TestIndexReaderReopen.createIndex(dir1, false);
IndexReader reader1 = IndexReader.open(dir1);
IndexReader reader1 = IndexReader.open(dir1, false);
IndexReader reader2C = (IndexReader) reader1.clone();
SegmentReader segmentReader2C = SegmentReader.getOnlySegmentReader(reader2C);
@ -243,7 +243,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
}
private void modifyNormsForF1(Directory dir) throws IOException {
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
modifyNormsForF1(ir);
}
@ -268,7 +268,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
}
private void verifyIndex(Directory dir) throws IOException {
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
verifyIndex(ir);
ir.close();
}

View File

@ -62,7 +62,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
protected IndexReader openReader() throws IOException {
return IndexReader.open(dir1);
return IndexReader.open(dir1, false);
}
});
@ -78,7 +78,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
protected IndexReader openReader() throws IOException {
return IndexReader.open(dir2);
return IndexReader.open(dir2, false);
}
});
@ -100,8 +100,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
return pr;
}
@ -123,11 +123,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir3));
pr.add(IndexReader.open(dir4));
pr.add(IndexReader.open(dir3, false));
pr.add(IndexReader.open(dir4, false));
// Does not implement reopen, so
// hits exception:
pr.add(new FilterIndexReader(IndexReader.open(dir3)));
pr.add(new FilterIndexReader(IndexReader.open(dir3, false)));
return pr;
}
@ -164,7 +164,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
private void doTestReopenWithCommit (Directory dir, boolean withReopen) throws IOException {
IndexWriter iwriter = new IndexWriter(dir, new KeywordAnalyzer(), true, MaxFieldLength.LIMITED);
iwriter.setMergeScheduler(new SerialMergeScheduler());
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
try {
int M = 3;
for (int i=0; i<4; i++) {
@ -194,7 +194,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
} else {
// recreate
reader.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
}
}
} finally {
@ -220,8 +220,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
return new MultiReader(new IndexReader[]
{IndexReader.open(dir1),
IndexReader.open(dir2)});
{IndexReader.open(dir1, false),
IndexReader.open(dir2, false)});
}
});
@ -244,11 +244,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
return new MultiReader(new IndexReader[]
{IndexReader.open(dir3),
IndexReader.open(dir4),
{IndexReader.open(dir3, false),
IndexReader.open(dir4, false),
// Does not implement reopen, so
// hits exception:
new FilterIndexReader(IndexReader.open(dir3))});
new FilterIndexReader(IndexReader.open(dir3, false))});
}
});
@ -280,12 +280,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected IndexReader openReader() throws IOException {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
MultiReader mr = new MultiReader(new IndexReader[] {
IndexReader.open(dir3), IndexReader.open(dir4)});
IndexReader.open(dir3, false), IndexReader.open(dir4, false)});
return new MultiReader(new IndexReader[] {
pr, mr, IndexReader.open(dir5)});
pr, mr, IndexReader.open(dir5, false)});
}
});
dir1.close();
@ -347,7 +347,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Directory dir1 = new MockRAMDirectory();
createIndex(dir1, true);
IndexReader reader0 = IndexReader.open(dir1);
IndexReader reader0 = IndexReader.open(dir1, false);
assertRefCountEquals(1, reader0);
assertTrue(reader0 instanceof DirectoryReader);
@ -357,7 +357,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
// delete first document, so that only one of the subReaders have to be re-opened
IndexReader modifier = IndexReader.open(dir1);
IndexReader modifier = IndexReader.open(dir1, false);
modifier.deleteDocument(0);
modifier.close();
@ -376,7 +376,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
// delete first document, so that only one of the subReaders have to be re-opened
modifier = IndexReader.open(dir1);
modifier = IndexReader.open(dir1, false);
modifier.deleteDocument(1);
modifier.close();
@ -454,10 +454,10 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Directory dir2 = new MockRAMDirectory();
createIndex(dir2, true);
IndexReader reader1 = IndexReader.open(dir1);
IndexReader reader1 = IndexReader.open(dir1, false);
assertRefCountEquals(1, reader1);
IndexReader initReader2 = IndexReader.open(dir2);
IndexReader initReader2 = IndexReader.open(dir2, false);
IndexReader multiReader1 = new MultiReader(new IndexReader[] {reader1, initReader2}, (mode == 0));
modifyIndex(0, dir2);
assertRefCountEquals(1 + mode, reader1);
@ -525,12 +525,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Directory dir2 = new MockRAMDirectory();
createIndex(dir2, true);
IndexReader reader1 = IndexReader.open(dir1);
IndexReader reader1 = IndexReader.open(dir1, false);
assertRefCountEquals(1, reader1);
ParallelReader parallelReader1 = new ParallelReader(mode == 0);
parallelReader1.add(reader1);
IndexReader initReader2 = IndexReader.open(dir2);
IndexReader initReader2 = IndexReader.open(dir2, false);
parallelReader1.add(initReader2);
modifyIndex(1, dir2);
assertRefCountEquals(1 + mode, reader1);
@ -597,26 +597,26 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Directory dir1 = new MockRAMDirectory();
createIndex(dir1, false);
IndexReader reader1 = IndexReader.open(dir1);
IndexReader reader1 = IndexReader.open(dir1, false);
SegmentReader segmentReader1 = SegmentReader.getOnlySegmentReader(reader1);
IndexReader modifier = IndexReader.open(dir1);
IndexReader modifier = IndexReader.open(dir1, false);
modifier.deleteDocument(0);
modifier.close();
IndexReader reader2 = reader1.reopen();
modifier = IndexReader.open(dir1);
modifier = IndexReader.open(dir1, false);
modifier.setNorm(1, "field1", 50);
modifier.setNorm(1, "field2", 50);
modifier.close();
IndexReader reader3 = reader2.reopen();
SegmentReader segmentReader3 = SegmentReader.getOnlySegmentReader(reader3);
modifier = IndexReader.open(dir1);
modifier = IndexReader.open(dir1, false);
modifier.deleteDocument(2);
modifier.close();
IndexReader reader4 = reader3.reopen();
modifier = IndexReader.open(dir1);
modifier = IndexReader.open(dir1, false);
modifier.deleteDocument(3);
modifier.close();
@ -697,11 +697,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
final TestReopen test = new TestReopen() {
protected void modifyIndex(int i) throws IOException {
if (i % 3 == 0) {
IndexReader modifier = IndexReader.open(dir);
IndexReader modifier = IndexReader.open(dir, false);
modifier.setNorm(i, "field1", 50);
modifier.close();
} else if (i % 3 == 1) {
IndexReader modifier = IndexReader.open(dir);
IndexReader modifier = IndexReader.open(dir, false);
modifier.deleteDocument(i % modifier.maxDoc());
modifier.close();
} else {
@ -712,12 +712,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
protected IndexReader openReader() throws IOException {
return IndexReader.open(dir);
return IndexReader.open(dir, false);
}
};
final List readers = Collections.synchronizedList(new ArrayList());
IndexReader firstReader = IndexReader.open(dir);
IndexReader firstReader = IndexReader.open(dir, false);
IndexReader reader = firstReader;
final Random rnd = newRandom();
@ -945,7 +945,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, false);
if (multiSegment) {
assertTrue(r.getSequentialSubReaders().length > 1);
} else {
@ -980,7 +980,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
break;
}
case 1: {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.setNorm(4, "field1", 123);
reader.setNorm(44, "field2", 222);
reader.setNorm(44, "field4", 22);
@ -1003,7 +1003,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
break;
}
case 4: {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.setNorm(5, "field1", 123);
reader.setNorm(55, "field2", 222);
reader.close();
@ -1081,71 +1081,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
indexDir = new File(tempDir, "IndexReaderReopen");
}
// LUCENE-1453
public void testFSDirectoryReopen() throws CorruptIndexException, IOException {
Directory dir1 = FSDirectory.getDirectory(indexDir, null);
createIndex(dir1, false);
dir1.close();
IndexReader ir = IndexReader.open(indexDir);
modifyIndex(3, ir.directory());
IndexReader newIr = ir.reopen();
modifyIndex(3, newIr.directory());
IndexReader newIr2 = newIr.reopen();
modifyIndex(3, newIr2.directory());
IndexReader newIr3 = newIr2.reopen();
ir.close();
newIr.close();
newIr2.close();
// shouldn't throw Directory AlreadyClosedException
modifyIndex(3, newIr3.directory());
newIr3.close();
}
// LUCENE-1453
public void testFSDirectoryReopen2() throws CorruptIndexException, IOException {
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
throw new IOException("java.io.tmpdir undefined, cannot run test");
File indexDir2 = new File(tempDir, "IndexReaderReopen2");
Directory dir1 = FSDirectory.getDirectory(indexDir2);
createIndex(dir1, false);
IndexReader lastReader = IndexReader.open(indexDir2);
Random r = newRandom();
for(int i=0;i<10;i++) {
int mod = r.nextInt(5);
modifyIndex(mod, lastReader.directory());
IndexReader reader = lastReader.reopen();
if (reader != lastReader) {
lastReader.close();
lastReader = reader;
}
}
lastReader.close();
// Make sure we didn't pick up too many incRef's along
// the way -- this close should be the final close:
dir1.close();
try {
dir1.listAll();
fail("did not hit AlreadyClosedException");
} catch (AlreadyClosedException ace) {
// expected
}
}
public void testCloseOrig() throws Throwable {
Directory dir = new MockRAMDirectory();
createIndex(dir, false);
IndexReader r1 = IndexReader.open(dir);
IndexReader r2 = IndexReader.open(dir);
IndexReader r1 = IndexReader.open(dir, false);
IndexReader r2 = IndexReader.open(dir, false);
r2.deleteDocument(0);
r2.close();
@ -1169,7 +1109,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
modifyIndex(0, dir); // Get delete bitVector on 1st segment
modifyIndex(5, dir); // Add a doc (2 segments)
IndexReader r1 = IndexReader.open(dir); // MSR
IndexReader r1 = IndexReader.open(dir, false); // MSR
modifyIndex(5, dir); // Add another doc (3 segments)
@ -1200,7 +1140,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
createIndex(dir, false);
// Get delete bitVector
modifyIndex(0, dir);
IndexReader r1 = IndexReader.open(dir);
IndexReader r1 = IndexReader.open(dir, false);
// Add doc:
modifyIndex(5, dir);
@ -1250,7 +1190,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
}
writer.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, false);
assertEquals(0, r.numDocs());
assertEquals(4, r.maxDoc());

View File

@ -104,7 +104,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
// delete 40 documents
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, false);
for (i = 0; i < 40; i++) {
reader.deleteDocument(i);
}
@ -115,7 +115,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals(100, writer.docCount());
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
@ -130,7 +130,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
@ -202,7 +202,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
// Make sure starting index seems to be working properly:
Term searchTerm = new Term("content", "aaa");
IndexReader reader = IndexReader.open(startDir);
IndexReader reader = IndexReader.open(startDir, true);
assertEquals("first docFreq", 57, reader.docFreq(searchTerm));
IndexSearcher searcher = new IndexSearcher(reader);
@ -315,7 +315,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
} else if (1 == method) {
IndexReader readers[] = new IndexReader[dirs.length];
for(int i=0;i<dirs.length;i++) {
readers[i] = IndexReader.open(dirs[i]);
readers[i] = IndexReader.open(dirs[i], true);
}
try {
writer.addIndexes(readers);
@ -387,7 +387,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
// failed, we see either all docs or no docs added
// (transactional semantics):
try {
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail(testName + ": exception when creating IndexReader: " + e);
@ -538,7 +538,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit);
// Make sure reader can open the index:
IndexReader.open(dir).close();
IndexReader.open(dir, true).close();
dir.close();
@ -592,7 +592,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// Make sure all terms < max size were indexed
assertEquals(2, reader.docFreq(new Term("content", "abc")));
@ -622,7 +622,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
reader.close();
@ -765,7 +765,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
@ -775,7 +775,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
@ -784,68 +784,6 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
}
// Same test as above, but use IndexWriter constructor
// that takes File:
public void testCreateWithReader2() throws IOException {
File indexDir = _TestUtil.getTempDir("lucenetestindexwriter");
try {
// add one document & close writer
IndexWriter writer = new IndexWriter(indexDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(indexDir);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(indexDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
assertEquals("should be zero documents", writer.docCount(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(indexDir);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(indexDir);
}
}
// Same test as above, but use IndexWriter constructor
// that takes String:
public void testCreateWithReader3() throws IOException {
File dirName = _TestUtil.getTempDir("lucenetestindexwriter");
try {
// add one document & close writer
IndexWriter writer = new IndexWriter(dirName, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer);
writer.close();
// now open reader:
IndexReader reader = IndexReader.open(dirName);
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
writer = new IndexWriter(dirName, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
assertEquals("should be zero documents", writer.docCount(), 0);
addDoc(writer);
writer.close();
assertEquals("should be one document", reader.numDocs(), 1);
IndexReader reader2 = IndexReader.open(dirName);
assertEquals("should be one document", reader2.numDocs(), 1);
reader.close();
reader2.close();
} finally {
rmDir(dirName);
}
}
// Simulate a writer that crashed while writing segments
// file: make sure we can still open the index (ie,
// gracefully fallback to the previous segments file),
@ -886,7 +824,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexReader reader = null;
try {
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
} catch (Exception e) {
fail("reader failed to open on a crashed index");
}
@ -944,7 +882,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexReader reader = null;
try {
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
@ -1003,7 +941,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexReader reader = null;
try {
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
fail("reader did not hit IOException on opening a corrupt index");
} catch (Exception e) {
}
@ -1027,19 +965,19 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
searcher.close();
@ -1050,7 +988,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
assertFalse("reader should not be current now", reader.isCurrent());
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader did not see changes after writer was closed", 47, hits.length);
searcher.close();
@ -1075,7 +1013,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("first number of hits", 14, hits.length);
searcher.close();
@ -1088,7 +1026,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
// Delete all docs:
writer.deleteDocuments(searchTerm);
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
searcher.close();
@ -1098,7 +1036,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertNoUnreferencedFiles(dir, "unreferenced files remain after abort()");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("saw changes after writer.abort", 14, hits.length);
searcher.close();
@ -1116,14 +1054,14 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
for(int j=0;j<17;j++) {
addDoc(writer);
}
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length);
searcher.close();
}
writer.close();
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("didn't see changes after close", 218, hits.length);
searcher.close();
@ -1159,7 +1097,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
IndexReader.open(dir).close();
IndexReader.open(dir, true).close();
long endDiskUsage = dir.getMaxUsedSizeInBytes();
@ -1194,7 +1132,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// Reader should see index as unoptimized at this
// point:
@ -1206,7 +1144,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
@ -1218,7 +1156,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
// Open a reader after aborting writer:
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
// Reader should still see index as unoptimized:
assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
@ -1231,7 +1169,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.flush();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
@ -1240,7 +1178,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.flush();
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(0, reader.maxDoc());
assertEquals(0, reader.numDocs());
reader.close();
@ -1262,7 +1200,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(100, reader.maxDoc());
assertEquals(100, reader.numDocs());
for(int j=0;j<100;j++) {
@ -1453,7 +1391,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
writer.close();
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
assertEquals(300, hits.length);
searcher.close();
@ -1479,7 +1417,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
Term searchTerm = new Term("field", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(10, hits.length);
searcher.close();
@ -1497,12 +1435,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, false);
hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals(27, hits.length);
searcher.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
reader.close();
dir.close();
@ -1525,7 +1463,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.maxDoc());
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
@ -1562,7 +1500,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
writer.close();
Term searchTerm = new Term("content", "aaa");
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
@ -1602,7 +1540,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(new Document());
writer.close();
_TestUtil.checkIndex(dir);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
}
@ -1625,7 +1563,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
if (0 == pass) {
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.isOptimized());
reader.close();
} else {
@ -1635,7 +1573,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertTrue(!reader.isOptimized());
reader.close();
@ -1832,7 +1770,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
final Term t = new Term("content", "aa");
assertEquals(reader.docFreq(t), 3);
@ -1907,7 +1845,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
assertTrue(hitError);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(198, reader.docFreq(new Term("content", "aa")));
reader.close();
}
@ -1968,7 +1906,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
int expected = 3+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
@ -1995,7 +1933,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
expected = 19+(1-i)*2;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
@ -2080,7 +2018,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.close();
}
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
int expected = (3+(1-i)*2)*NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
@ -2107,7 +2045,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
expected += 17-NUM_THREAD*NUM_ITER;
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
assertEquals(expected, reader.maxDoc());
@ -2162,7 +2100,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocument(delID++);
reader.close();
@ -2251,7 +2189,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
t1.join();
// Make sure reader can read
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
reader.close();
// Reopen
@ -2376,7 +2314,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
// Quick test to make sure index is not corrupt:
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermDocs tdocs = reader.termDocs(new Term("field", "aaa"));
int count = 0;
while(tdocs.next()) {
@ -2552,7 +2490,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
}
if (success) {
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
for(int j=0;j<reader.maxDoc();j++) {
if (!reader.isDeleted(j)) {
reader.document(j);
@ -2685,7 +2623,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
@ -2722,7 +2660,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
IndexReader reader = null;
try {
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
} catch (IOException e) {
e.printStackTrace(System.out);
fail("segmentInfos failed to retry fallback to correct segments_N file");
@ -2741,7 +2679,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.commit();
IndexReader reader2 = reader.reopen();
@ -2753,12 +2691,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
addDoc(writer);
assertEquals(23, reader2.numDocs());
reader2.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
@ -2805,7 +2743,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
failure.clearDoFail();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
dir.close();
@ -2843,7 +2781,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermFreqVectors(i);
@ -2894,7 +2832,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertTrue(reader.getTermFreqVectors(0)==null);
assertTrue(reader.getTermFreqVectors(1)==null);
assertTrue(reader.getTermFreqVectors(2)!=null);
@ -2941,7 +2879,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
for(int i=0;i<10;i++) {
reader.getTermFreqVectors(i);
reader.document(i);
@ -2965,7 +2903,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
Term t = new Term("field", "x");
assertEquals(1, reader.docFreq(t));
reader.close();
@ -2996,7 +2934,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
assertEquals(10, ir.maxDoc());
assertEquals(10, ir.numDocs());
ir.deleteDocument(0);
@ -3012,7 +2950,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.expungeDeletes();
assertEquals(8, writer.numDocs());
writer.close();
ir = IndexReader.open(dir);
ir = IndexReader.open(dir, true);
assertEquals(8, ir.maxDoc());
assertEquals(8, ir.numDocs());
ir.close();
@ -3043,7 +2981,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
@ -3058,7 +2996,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals(49, writer.numDocs());
writer.expungeDeletes();
writer.close();
ir = IndexReader.open(dir);
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
@ -3090,7 +3028,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.addDocument(document);
writer.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
assertEquals(98, ir.maxDoc());
assertEquals(98, ir.numDocs());
for(int i=0;i<98;i+=2)
@ -3105,7 +3043,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.setMergeFactor(3);
writer.expungeDeletes(false);
writer.close();
ir = IndexReader.open(dir);
ir = IndexReader.open(dir, true);
assertEquals(49, ir.maxDoc());
assertEquals(49, ir.numDocs());
ir.close();
@ -3258,7 +3196,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertTrue(w.wasCalled);
w.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
assertEquals(1, ir.maxDoc());
assertEquals(0, ir.numDocs());
ir.close();
@ -3354,7 +3292,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
Document doc2 = ir.document(0);
for(int i=0;i<count;i++) {
assertEquals("field " + i + " was not indexed correctly", 1, ir.docFreq(new Term("f"+i, utf8Data[2*i+1])));
@ -3563,7 +3501,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.commit();
IndexSearcher s = new IndexSearcher(dir);
IndexSearcher s = new IndexSearcher(dir, false);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("field", "a"));
pq.add(new Term("field", "b"));
@ -3596,12 +3534,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.commit();
@ -3618,18 +3556,18 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals(23, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(23, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(40, reader.numDocs());
reader.close();
writer.close();
@ -3649,12 +3587,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
for (int i = 0; i < 23; i++)
addDoc(writer);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
writer.prepareCommit();
IndexReader reader2 = IndexReader.open(dir);
IndexReader reader2 = IndexReader.open(dir, true);
assertEquals(0, reader2.numDocs());
writer.rollback();
@ -3672,18 +3610,18 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals(0, reader3.numDocs());
reader3.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.prepareCommit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
writer.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(17, reader.numDocs());
reader.close();
writer.close();
@ -3699,7 +3637,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer.commit();
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
dir.close();
@ -3733,7 +3671,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
readers = new IndexReader[NUM_COPY];
for(int i=0;i<NUM_COPY;i++)
readers[i] = IndexReader.open(dir);
readers[i] = IndexReader.open(dir, true);
}
void launchThreads(final int numIter) {
@ -3837,7 +3775,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*c.NUM_THREADS*c.NUM_INIT_DOCS, reader.numDocs());
reader.close();
@ -4019,7 +3957,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();
@ -4055,7 +3993,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
byte[] cmp = new byte[20];
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
try {
for(int i=0;i<5;i++) {
Document doc = r.document(i);
@ -4083,7 +4021,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals(0, IndexReader.getCommitUserData(dir).size());
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
// commit(Map) never called for this index
assertEquals(0, r.getCommitUserData().size());
r.close();
@ -4099,7 +4037,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
@ -4177,7 +4115,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
// Token "" occurred once
@ -4209,7 +4147,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
@ -4231,7 +4169,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
@ -4255,7 +4193,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
@ -4281,7 +4219,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
@ -4303,7 +4241,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermVectorOffsetInfo[] termOffsets = ((TermPositionVector) r.getTermFreqVector(0, "field")).getOffsets(0);
assertEquals(2, termOffsets.length);
assertEquals(0, termOffsets[0].getStartOffset());
@ -4328,7 +4266,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
@ -4358,7 +4296,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
@ -4389,7 +4327,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.addDocument(doc);
w.close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
TermPositionVector tpv = ((TermPositionVector) r.getTermFreqVector(0, "field"));
TermVectorOffsetInfo[] termOffsets = tpv.getOffsets(0);
assertEquals(1, termOffsets.length);
@ -4448,12 +4386,12 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
writer2.addDocument(doc);
writer2.close();
IndexReader r1 = IndexReader.open(dir2);
IndexReader r1 = IndexReader.open(dir2, true);
IndexReader r2 = (IndexReader) r1.clone();
writer.addIndexes(new IndexReader[] {r1, r2});
writer.close();
IndexReader r3 = IndexReader.open(dir);
IndexReader r3 = IndexReader.open(dir, true);
assertEquals(5, r3.numDocs());
r3.close();
@ -4532,7 +4470,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
e.printStackTrace(System.out);
}
try {
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
//System.out.println("doc count=" + r.numDocs());
r.close();
} catch (Exception e) {
@ -4594,7 +4532,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase {
w.commit();
w.optimize(); // force segment merge.
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
doc = ir.document(0);
f = doc.getField("binary");
b = f.getBinaryValue();

View File

@ -102,7 +102,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -110,7 +110,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(0, reader.numDocs());
reader.close();
modifier.close();
@ -168,7 +168,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(1, reader.numDocs());
int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
@ -211,7 +211,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(5, reader.numDocs());
modifier.close();
}
@ -235,7 +235,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
modifier.commit();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -245,7 +245,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(5, reader.numDocs());
reader.close();
@ -255,7 +255,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
modifier.deleteDocuments(terms);
modifier.commit();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
@ -282,7 +282,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
modifier.commit();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -293,7 +293,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.deleteAll();
// Delete all shouldn't be on disk yet
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -305,7 +305,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.commit();
// Validate there are no docs left
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(2, reader.numDocs());
reader.close();
@ -332,7 +332,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
addDoc(modifier, ++id, value);
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -344,7 +344,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.close();
// Validate that the docs are still there
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -388,7 +388,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
modifier.close();
// Validate that the docs are still there
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
assertEquals(7, reader.numDocs());
reader.close();
@ -420,7 +420,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
}
private int getHitCount(Directory dir, Term term) throws IOException {
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
int hitCount = searcher.search(new TermQuery(term), null, 1000).totalHits;
searcher.close();
return hitCount;
@ -567,7 +567,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
// changed (transactional semantics):
IndexReader newReader = null;
try {
newReader = IndexReader.open(dir);
newReader = IndexReader.open(dir, true);
}
catch (IOException e) {
e.printStackTrace();

View File

@ -158,7 +158,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
}
// Confirm that when doc hits exception partway through tokenization, it's deleted:
IndexReader r2 = IndexReader.open(dir);
IndexReader r2 = IndexReader.open(dir, true);
final int count = r2.docFreq(new Term("content4", "aaa"));
final int count2 = r2.docFreq(new Term("content4", "ddd"));
assertEquals(count, count2);
@ -204,7 +204,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
}
// Confirm that when doc hits exception partway through tokenization, it's deleted:
IndexReader r2 = IndexReader.open(dir);
IndexReader r2 = IndexReader.open(dir, true);
final int count = r2.docFreq(new Term("content4", "aaa"));
final int count2 = r2.docFreq(new Term("content4", "ddd"));
assertEquals(count, count2);

View File

@ -178,7 +178,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, false);
reader.deleteDocuments(new Term("content", "aaa"));
reader.close();

View File

@ -71,7 +71,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
private boolean verifyIndex(Directory directory, int startAt) throws IOException
{
boolean fail = false;
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
int max = reader.maxDoc();
for (int i = 0; i < max; i++)

View File

@ -101,7 +101,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
r2.close();
writer.close();
IndexReader r3 = IndexReader.open(dir1);
IndexReader r3 = IndexReader.open(dir1, true);
assertEquals(0, count(new Term("id", id10), r3));
assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
r3.close();
@ -262,7 +262,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
_TestUtil.checkIndex(mainDir);
IndexReader reader = IndexReader.open(mainDir);
IndexReader reader = IndexReader.open(mainDir, true);
assertEquals(addDirThreads.count.intValue(), reader.numDocs());
//assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.NUM_THREADS
// * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
@ -364,7 +364,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
readers = new IndexReader[numDirs];
for (int i = 0; i < numDirs; i++)
readers[i] = IndexReader.open(addDir);
readers[i] = IndexReader.open(addDir, false);
}
void joinThreads() {
@ -824,7 +824,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
w.expungeDeletes();
w.close();
r.close();
r = IndexReader.open(dir);
r = IndexReader.open(dir, true);
assertEquals(1, r.numDocs());
assertFalse(r.hasDeletions());
r.close();

View File

@ -88,7 +88,7 @@ public class TestLazyBug extends LuceneTestCase {
public void doTest(int[] docs) throws Exception {
Directory dir = makeIndex();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
for (int i = 0; i < docs.length; i++) {
Document d = reader.document(docs[i], SELECTOR);
d.get(MAGIC_FIELD);

View File

@ -125,7 +125,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
TermPositions tp = reader.termPositions();
tp.seek(new Term(this.field, "b"));
for (int i = 0; i < 10; i++) {

View File

@ -161,7 +161,7 @@ public class TestNorms extends LuceneTestCase {
}
private void modifyNormsForF1(Directory dir) throws IOException {
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
int n = ir.maxDoc();
for (int i = 0; i < n; i+=3) { // modify for every third doc
int k = (i*3) % modifiedNorms.size();
@ -179,7 +179,7 @@ public class TestNorms extends LuceneTestCase {
private void verifyIndex(Directory dir) throws IOException {
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, false);
for (int i = 0; i < NUM_FIELDS; i++) {
String field = "f"+i;
byte b[] = ir.norms(field);

View File

@ -267,7 +267,7 @@ public class TestOmitTf extends LuceneTestCase {
/*
* Verify the index
*/
Searcher searcher = new IndexSearcher(dir);
Searcher searcher = new IndexSearcher(dir, true);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("noTf", term);

View File

@ -68,8 +68,8 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = getDir1();
Directory dir2 = getDir2();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
Collection fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
assertEquals(4, fieldNames.size());
assertTrue(fieldNames.contains("f1"));
@ -82,8 +82,8 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = getDir1();
Directory dir2 = getDir2();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
Document doc11 = pr.document(0, new MapFieldSelector(new String[] {"f1"}));
Document doc24 = pr.document(1, new MapFieldSelector(Arrays.asList(new String[] {"f4"})));
@ -112,9 +112,9 @@ public class TestParallelReader extends LuceneTestCase {
w2.close();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir1, false));
try {
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir2, false));
fail("didn't get exptected exception: indexes don't have same number of documents");
} catch (IllegalArgumentException e) {
// expected exception
@ -125,11 +125,11 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = getDir1();
Directory dir2 = getDir2();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
assertTrue(pr.isCurrent());
IndexReader modifier = IndexReader.open(dir1);
IndexReader modifier = IndexReader.open(dir1, false);
modifier.setNorm(0, "f1", 100);
modifier.close();
@ -137,7 +137,7 @@ public class TestParallelReader extends LuceneTestCase {
// is not current anymore
assertFalse(pr.isCurrent());
modifier = IndexReader.open(dir2);
modifier = IndexReader.open(dir2, false);
modifier.setNorm(0, "f3", 100);
modifier.close();
@ -164,8 +164,8 @@ public class TestParallelReader extends LuceneTestCase {
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
assertFalse(pr.isOptimized());
pr.close();
@ -174,8 +174,8 @@ public class TestParallelReader extends LuceneTestCase {
modifier.close();
pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
// just one of the two indexes are optimized
assertFalse(pr.isOptimized());
pr.close();
@ -186,8 +186,8 @@ public class TestParallelReader extends LuceneTestCase {
modifier.close();
pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
// now both indexes are optimized
assertTrue(pr.isOptimized());
pr.close();
@ -198,8 +198,8 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = getDir1();
Directory dir2 = getDir2();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
int NUM_DOCS = 2;
TermDocs td = pr.termDocs(null);
for(int i=0;i<NUM_DOCS;i++) {
@ -247,7 +247,7 @@ public class TestParallelReader extends LuceneTestCase {
w.addDocument(d2);
w.close();
return new IndexSearcher(dir);
return new IndexSearcher(dir, false);
}
// Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
@ -255,8 +255,8 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = getDir1();
Directory dir2 = getDir2();
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(dir1));
pr.add(IndexReader.open(dir2));
pr.add(IndexReader.open(dir1, false));
pr.add(IndexReader.open(dir2, false));
return new IndexSearcher(pr);
}

View File

@ -61,8 +61,8 @@ public class TestParallelTermEnum extends LuceneTestCase {
iw2.close();
this.ir1 = IndexReader.open(rd1);
this.ir2 = IndexReader.open(rd2);
this.ir1 = IndexReader.open(rd1, true);
this.ir2 = IndexReader.open(rd2, true);
}
protected void tearDown() throws Exception {

View File

@ -220,7 +220,7 @@ public class TestPayloads extends LuceneTestCase {
* Verify the index
* first we test if all payloads are stored correctly
*/
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
byte[] verifyPayloadData = new byte[payloadDataLength];
offset = 0;
@ -320,7 +320,7 @@ public class TestPayloads extends LuceneTestCase {
// flush
writer.close();
reader = IndexReader.open(dir);
reader = IndexReader.open(dir, true);
tp = reader.termPositions(new Term(fieldName, singleTerm));
tp.next();
tp.nextPosition();
@ -492,7 +492,7 @@ public class TestPayloads extends LuceneTestCase {
ingesters[i].join();
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermEnum terms = reader.terms();
while (terms.next()) {
TermPositions tp = reader.termPositions(terms.term());

View File

@ -81,7 +81,7 @@ public class TestSegmentTermEnum extends LuceneTestCase
private void verifyDocFreq()
throws IOException
{
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermEnum termEnum = null;
// create enumeration of all terms

View File

@ -106,7 +106,7 @@ public class TestStressIndexing extends LuceneTestCase {
public void doWork() throws Throwable {
for (int i=0; i<100; i++)
(new IndexSearcher(directory)).close();
(new IndexSearcher(directory, true)).close();
count += 100;
}
}

View File

@ -243,14 +243,14 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
public static void verifyEquals(IndexReader r1, Directory dir2, String idField) throws Throwable {
IndexReader r2 = IndexReader.open(dir2);
IndexReader r2 = IndexReader.open(dir2, true);
verifyEquals(r1, r2, idField);
r2.close();
}
public static void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable {
IndexReader r1 = IndexReader.open(dir1);
IndexReader r2 = IndexReader.open(dir2);
IndexReader r1 = IndexReader.open(dir1, true);
IndexReader r2 = IndexReader.open(dir2, true);
verifyEquals(r1, r2, idField);
r1.close();
r2.close();

View File

@ -342,7 +342,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
// test setDocumentNumber()
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
DocNumAwareMapper docNumAwareMapper = new DocNumAwareMapper();
assertEquals(-1, docNumAwareMapper.getDocumentNumber());

View File

@ -92,7 +92,7 @@ public class TestTermdocPerf extends LuceneTestCase {
long end = System.currentTimeMillis();
System.out.println("milliseconds for creation of " + ndocs + " docs = " + (end-start));
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermEnum tenum = reader.terms(new Term("foo","val"));
TermDocs tdocs = reader.termDocs();

View File

@ -124,7 +124,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
writer.setMaxBufferedDocs(2);
}
IndexReader reader = IndexReader.open(directory);
IndexReader reader = IndexReader.open(directory, true);
assertTrue(reader.isOptimized());
assertEquals(expectedDocCount, reader.numDocs());
reader.close();

View File

@ -88,7 +88,7 @@ public class TestTransactionRollback extends LuceneTestCase {
}
private void checkExpecteds(BitSet expecteds) throws Exception {
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
//Perhaps not the most efficient approach but meets our needs here.
for (int i = 0; i < r.maxDoc(); i++) {
@ -199,7 +199,7 @@ public class TestTransactionRollback extends LuceneTestCase {
new IndexWriter(dir,new WhitespaceAnalyzer(),
new DeleteLastCommitPolicy(),
MaxFieldLength.UNLIMITED).close();
IndexReader r = IndexReader.open(dir);
IndexReader r = IndexReader.open(dir, true);
assertEquals(100, r.numDocs());
r.close();
}

View File

@ -161,8 +161,8 @@ public class TestTransactions extends LuceneTestCase
public void doWork() throws Throwable {
IndexReader r1, r2;
synchronized(lock) {
r1 = IndexReader.open(dir1);
r2 = IndexReader.open(dir2);
r1 = IndexReader.open(dir1, true);
r2 = IndexReader.open(dir2, true);
}
if (r1.numDocs() != r2.numDocs())
throw new RuntimeException("doc counts differ: r1=" + r1.numDocs() + " r2=" + r2.numDocs());

View File

@ -294,7 +294,7 @@ public class TestMultiFieldQueryParser extends BaseTokenStreamTestCase {
new MultiFieldQueryParser(new String[] {"body"}, analyzer);
mfqp.setDefaultOperator(QueryParser.Operator.AND);
Query q = mfqp.parse("the footest");
IndexSearcher is = new IndexSearcher(ramDir);
IndexSearcher is = new IndexSearcher(ramDir, true);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
assertEquals(1, hits.length);
is.close();

View File

@ -41,7 +41,6 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.DateField;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@ -470,7 +469,7 @@ public class TestQueryParser extends LocalizedTestCase {
Field.Store.YES, Field.Index.UN_TOKENIZED));
iw.addDocument(doc);
iw.close();
IndexSearcher is = new IndexSearcher(ramDir);
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser qp = new QueryParser("content", new WhitespaceAnalyzer());
@ -513,27 +512,6 @@ public class TestQueryParser extends LocalizedTestCase {
}
}
/** for testing legacy DateField support */
private String getLegacyDate(String s) throws Exception {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
return DateField.dateToString(df.parse(s));
}
/** for testing DateTools support */
private String getDate(String s, DateTools.Resolution resolution) throws Exception {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
return getDate(df.parse(s), resolution);
}
/** for testing DateTools support */
private String getDate(Date d, DateTools.Resolution resolution) throws Exception {
if (resolution == null) {
return DateField.dateToString(d);
} else {
return DateTools.dateToString(d, resolution);
}
}
private String getLocalizedDate(int year, int month, int day, boolean extendLastDate) {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
Calendar calendar = new GregorianCalendar();
@ -547,68 +525,6 @@ public class TestQueryParser extends LocalizedTestCase {
return df.format(calendar.getTime());
}
/** for testing legacy DateField support */
public void testLegacyDateRange() throws Exception {
String startDate = getLocalizedDate(2002, 1, 1, false);
String endDate = getLocalizedDate(2002, 1, 4, false);
Calendar endDateExpected = new GregorianCalendar();
endDateExpected.set(2002, 1, 4, 23, 59, 59);
endDateExpected.set(Calendar.MILLISECOND, 999);
assertQueryEquals("[ " + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", null,
"[" + getLegacyDate(startDate) + " TO " + DateField.dateToString(endDateExpected.getTime()) + "]");
assertQueryEquals("{ " + escapeDateString(startDate) + " " + escapeDateString(endDate) + " }", null,
"{" + getLegacyDate(startDate) + " TO " + getLegacyDate(endDate) + "}");
}
public void testDateRange() throws Exception {
String startDate = getLocalizedDate(2002, 1, 1, false);
String endDate = getLocalizedDate(2002, 1, 4, false);
Calendar endDateExpected = new GregorianCalendar();
endDateExpected.set(2002, 1, 4, 23, 59, 59);
endDateExpected.set(Calendar.MILLISECOND, 999);
final String defaultField = "default";
final String monthField = "month";
final String hourField = "hour";
QueryParser qp = new QueryParser("field", new SimpleAnalyzer());
// Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
endDateExpected.getTime(), null);
// set a field specific date resolution
qp.setDateResolution(monthField, DateTools.Resolution.MONTH);
// DateField should still be used for defaultField
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
endDateExpected.getTime(), null);
// set default date resolution to MILLISECOND
qp.setDateResolution(DateTools.Resolution.MILLISECOND);
// set second field specific date resolution
qp.setDateResolution(hourField, DateTools.Resolution.HOUR);
// for this field no field specific date resolution has been set,
// so verify if the default resolution is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
endDateExpected.getTime(), DateTools.Resolution.MILLISECOND);
// verify if field specific date resolutions are used for these two fields
assertDateRangeQueryEquals(qp, monthField, startDate, endDate,
endDateExpected.getTime(), DateTools.Resolution.MONTH);
assertDateRangeQueryEquals(qp, hourField, startDate, endDate,
endDateExpected.getTime(), DateTools.Resolution.HOUR);
}
public void assertDateRangeQueryEquals(QueryParser qp, String field, String startDate, String endDate,
Date endDateInclusive, DateTools.Resolution resolution) throws Exception {
assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]",
"[" + getDate(startDate, resolution) + " TO " + getDate(endDateInclusive, resolution) + "]");
assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}",
"{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}");
}
public void testEscaped() throws Exception {
Analyzer a = new WhitespaceAnalyzer();
@ -870,22 +786,6 @@ public class TestQueryParser extends LocalizedTestCase {
assertEquals(query1, query2);
}
public void testLocalDateFormat() throws IOException, ParseException {
RAMDirectory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
iw.close();
IndexSearcher is = new IndexSearcher(ramDir);
assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
is.close();
}
public void testStarParsing() throws Exception {
final int[] type = new int[1];
QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()) {
@ -999,16 +899,6 @@ public class TestQueryParser extends LocalizedTestCase {
assertEquals(expected, hits.length);
}
private static void addDateDoc(String content, int year, int month,
int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance(Locale.ENGLISH);
cal.set(year, month-1, day, hour, minute, second);
d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}
public void tearDown() throws Exception {
super.tearDown();
BooleanQuery.setMaxClauseCount(originalMaxClauses);

View File

@ -145,19 +145,19 @@ public class QueryUtils {
// we can't put deleted docs before the nested reader, because
// it will throw off the docIds
IndexReader[] readers = new IndexReader[] {
edge < 0 ? r : IndexReader.open(makeEmptyIndex(0)),
IndexReader.open(makeEmptyIndex(0)),
edge < 0 ? r : IndexReader.open(makeEmptyIndex(0), true),
IndexReader.open(makeEmptyIndex(0), true),
new MultiReader(new IndexReader[] {
IndexReader.open(makeEmptyIndex(edge < 0 ? 4 : 0)),
IndexReader.open(makeEmptyIndex(0)),
0 == edge ? r : IndexReader.open(makeEmptyIndex(0))
IndexReader.open(makeEmptyIndex(edge < 0 ? 4 : 0), true),
IndexReader.open(makeEmptyIndex(0), true),
0 == edge ? r : IndexReader.open(makeEmptyIndex(0), true)
}),
IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 7)),
IndexReader.open(makeEmptyIndex(0)),
IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 7), true),
IndexReader.open(makeEmptyIndex(0), true),
new MultiReader(new IndexReader[] {
IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 5)),
IndexReader.open(makeEmptyIndex(0)),
0 < edge ? r : IndexReader.open(makeEmptyIndex(0))
IndexReader.open(makeEmptyIndex(0 < edge ? 0 : 5), true),
IndexReader.open(makeEmptyIndex(0), true),
0 < edge ? r : IndexReader.open(makeEmptyIndex(0), true)
})
};
IndexSearcher out = new IndexSearcher(new MultiReader(readers));
@ -179,18 +179,18 @@ public class QueryUtils {
// we can't put deleted docs before the nested reader, because
// it will through off the docIds
Searcher[] searchers = new Searcher[] {
edge < 0 ? s : new IndexSearcher(makeEmptyIndex(0)),
edge < 0 ? s : new IndexSearcher(makeEmptyIndex(0), true),
new MultiSearcher(new Searcher[] {
new IndexSearcher(makeEmptyIndex(edge < 0 ? 65 : 0)),
new IndexSearcher(makeEmptyIndex(0)),
0 == edge ? s : new IndexSearcher(makeEmptyIndex(0))
new IndexSearcher(makeEmptyIndex(edge < 0 ? 65 : 0), true),
new IndexSearcher(makeEmptyIndex(0), true),
0 == edge ? s : new IndexSearcher(makeEmptyIndex(0), true)
}),
new IndexSearcher(makeEmptyIndex(0 < edge ? 0 : 3)),
new IndexSearcher(makeEmptyIndex(0)),
new IndexSearcher(makeEmptyIndex(0 < edge ? 0 : 3), true),
new IndexSearcher(makeEmptyIndex(0), true),
new MultiSearcher(new Searcher[] {
new IndexSearcher(makeEmptyIndex(0 < edge ? 0 : 5)),
new IndexSearcher(makeEmptyIndex(0)),
0 < edge ? s : new IndexSearcher(makeEmptyIndex(0))
new IndexSearcher(makeEmptyIndex(0 < edge ? 0 : 5), true),
new IndexSearcher(makeEmptyIndex(0), true),
0 < edge ? s : new IndexSearcher(makeEmptyIndex(0), true)
})
};
MultiSearcher out = new MultiSearcher(searchers);
@ -218,7 +218,7 @@ public class QueryUtils {
Assert.assertEquals("writer has non-deleted docs",
0, w.numDocs());
w.close();
IndexReader r = IndexReader.open(d);
IndexReader r = IndexReader.open(d, true);
Assert.assertEquals("reader has wrong number of deleted docs",
numDeletedDocs, r.numDeletedDocs());
r.close();

View File

@ -1,149 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.util.StringHelper;
import java.io.IOException;
import java.io.Serializable;
/**
* An example Comparable for use with the custom sort tests.
* It implements a comparable for "id" sort of values which
* consist of an alphanumeric part and a numeric part, such as:
* <p/>
* <P>ABC-123, A-1, A-7, A-100, B-99999
* <p/>
* <p>Such values cannot be sorted as strings, since A-100 needs
* to come after A-7.
* <p/>
* <p>It could be argued that the "ids" should be rewritten as
* A-0001, A-0100, etc. so they will sort as strings. That is
* a valid alternate way to solve it - but
* this is only supposed to be a simple test case.
* <p/>
* <p>Created: Apr 21, 2004 5:34:47 PM
*
*
* @version $Id$
* @since 1.4
*/
public class SampleComparable
implements Comparable, Serializable {
String string_part;
Integer int_part;
public SampleComparable (String s) {
int i = s.indexOf ("-");
string_part = s.substring (0, i);
int_part = Integer.valueOf(s.substring (i + 1));
}
public int compareTo (Object o) {
SampleComparable otherid = (SampleComparable) o;
int i = string_part.compareTo (otherid.string_part);
if (i == 0) return int_part.compareTo (otherid.int_part);
return i;
}
public static SortComparatorSource getComparatorSource () {
return new SortComparatorSource () {
public ScoreDocComparator newComparator (final IndexReader reader, String fieldname)
throws IOException {
final String field = StringHelper.intern(fieldname);
final TermEnum enumerator = reader.terms (new Term (fieldname, ""));
try {
return new ScoreDocComparator () {
protected Comparable[] cachedValues = fillCache (reader, enumerator, field);
public int compare (ScoreDoc i, ScoreDoc j) {
return cachedValues[i.doc].compareTo (cachedValues[j.doc]);
}
public Comparable sortValue (ScoreDoc i) {
return cachedValues[i.doc];
}
public int sortType () {
return SortField.CUSTOM;
}
};
} finally {
enumerator.close ();
}
}
/**
* Returns an array of objects which represent that natural order
* of the term values in the given field.
*
* @param reader Terms are in this index.
* @param enumerator Use this to get the term values and TermDocs.
* @param fieldname Comparables should be for this field.
* @return Array of objects representing natural order of terms in field.
* @throws IOException If an error occurs reading the index.
*/
protected Comparable[] fillCache (IndexReader reader, TermEnum enumerator, String fieldname)
throws IOException {
final String field = StringHelper.intern(fieldname);
Comparable[] retArray = new Comparable[reader.maxDoc ()];
if (retArray.length > 0) {
TermDocs termDocs = reader.termDocs ();
try {
if (enumerator.term () == null) {
throw new RuntimeException ("no terms in field " + field);
}
do {
Term term = enumerator.term ();
if (term.field () != field) break;
Comparable termval = getComparable (term.text ());
termDocs.seek (enumerator);
while (termDocs.next ()) {
retArray[termDocs.doc ()] = termval;
}
} while (enumerator.next ());
} finally {
termDocs.close ();
}
}
return retArray;
}
Comparable getComparable (String termtext) {
return new SampleComparable (termtext);
}
};
}
private static final class InnerSortComparator extends SortComparator {
protected Comparable getComparable (String termtext) {
return new SampleComparable (termtext);
}
public int hashCode() { return this.getClass().getName().hashCode(); }
public boolean equals(Object that) { return this.getClass().equals(that.getClass()); }
};
public static SortComparator getComparator() {
return new InnerSortComparator();
}
}

View File

@ -48,7 +48,7 @@ public class TestBoolean2 extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
}
private String[] docFields = {

View File

@ -74,7 +74,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
writer.optimize();
writer.close();
r = IndexReader.open(index);
r = IndexReader.open(index, true);
s = new IndexSearcher(r);
//System.out.println("Set up " + getName());

View File

@ -155,6 +155,6 @@ public class TestBooleanOr extends LuceneTestCase {
writer.close();
//
searcher = new IndexSearcher(rd);
searcher = new IndexSearcher(rd, true);
}
}

View File

@ -89,7 +89,7 @@ public class TestBooleanPrefixQuery extends LuceneTestCase {
}
writer.close();
reader = IndexReader.open(directory);
reader = IndexReader.open(directory, true);
PrefixQuery query = new PrefixQuery(new Term("category", "foo"));
rw1 = query.rewrite(reader);

View File

@ -64,7 +64,7 @@ public class TestBooleanScorer extends LuceneTestCase
query.add(booleanQuery1, BooleanClause.Occur.MUST);
query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);
IndexSearcher indexSearcher = new IndexSearcher(directory);
IndexSearcher indexSearcher = new IndexSearcher(directory, true);
ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs;
assertEquals("Number of matched documents", 2, hits.length);

View File

@ -36,7 +36,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
@ -76,7 +76,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
// not cacheable:
assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);

View File

@ -62,7 +62,7 @@ public class TestDateFilter
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// filter that should preserve matches
//DateFilter df1 = DateFilter.Before("datefield", now);
@ -123,7 +123,7 @@ public class TestDateFilter
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// filter that should preserve matches
//DateFilter df1 = DateFilter.After("datefield", now);

View File

@ -70,7 +70,7 @@ public class TestDateSort extends LuceneTestCase {
}
public void testReverseDateSort() throws Exception {
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
// Create a Sort object. reverse is set to true.
// problem occurs only with SortField.AUTO:

View File

@ -121,7 +121,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
writer.close();
r = IndexReader.open(index);
r = IndexReader.open(index, true);
s = new IndexSearcher(r);
s.setSimilarity(sim);
}

View File

@ -66,7 +66,7 @@ public class TestDocBoost extends LuceneTestCase {
final float[] scores = new float[4];
new IndexSearcher(store).search
new IndexSearcher(store, true).search
(new TermQuery(new Term("field", "word")),
new Collector() {
private int base = 0;

View File

@ -70,7 +70,7 @@ public class TestExplanations extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
}
protected String[] docFields = {

View File

@ -57,7 +57,7 @@ public class TestFieldCache extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
reader = IndexReader.open(directory);
reader = IndexReader.open(directory, true);
}
public void testInfoStream() throws Exception {

View File

@ -49,7 +49,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterId() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -135,7 +135,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterRand() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndex.minR);
@ -198,7 +198,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterShorts() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@ -289,7 +289,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterInts() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@ -381,7 +381,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterLongs() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@ -475,7 +475,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterFloats() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@ -503,7 +503,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterDoubles() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@ -545,7 +545,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
writer.deleteDocuments(new Term("id","0"));
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
IndexSearcher search = new IndexSearcher(reader);
assertTrue(reader.hasDeletions());

View File

@ -48,7 +48,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
}
w.close();
IndexReader reader = IndexReader.open(rd);
IndexReader reader = IndexReader.open(rd, true);
IndexSearcher searcher = new IndexSearcher(reader);
int numDocs = reader.numDocs();
ScoreDoc[] results;

View File

@ -61,7 +61,7 @@ public class TestFilteredSearch extends LuceneTestCase {
booleanQuery.add(new TermQuery(new Term(FIELD, "36")), BooleanClause.Occur.SHOULD);
IndexSearcher indexSearcher = new IndexSearcher(directory);
IndexSearcher indexSearcher = new IndexSearcher(directory, true);
ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
assertEquals("Number of matched documents", 1, hits.length);

View File

@ -45,7 +45,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("ddddd", writer);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -168,7 +168,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("segment", writer);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
FuzzyQuery query;
// not similar enough:
@ -257,7 +257,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("segment", writer);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
Query query;
// term not over 10 chars, so optimization shortcuts

View File

@ -48,7 +48,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
addDoc("three four", iw, 300f);
iw.close();
IndexReader ir = IndexReader.open(dir);
IndexReader ir = IndexReader.open(dir, true);
IndexSearcher is = new IndexSearcher(ir);
ScoreDoc[] hits;

View File

@ -59,7 +59,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// search for "blueberry pi*":
MultiPhraseQuery query1 = new MultiPhraseQuery();
@ -69,7 +69,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
query2.add(new Term("body", "strawberry"));
LinkedList termsWithPrefix = new LinkedList();
IndexReader ir = IndexReader.open(indexStore);
IndexReader ir = IndexReader.open(indexStore, true);
// this TermEnum gives "piccadilly", "pie" and "pizza".
String prefix = "pi";
@ -149,7 +149,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// This query will be equivalent to +body:pie +body:"blue*"
BooleanQuery q = new BooleanQuery();
q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
@ -175,7 +175,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
add("a note", "note", writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
// This query will be equivalent to +type:note +body:"a t*"
BooleanQuery q = new BooleanQuery();

View File

@ -109,8 +109,8 @@ public class TestMultiSearcher extends LuceneTestCase
// building the searchables
Searcher[] searchers = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers[0] = new IndexSearcher(indexStoreB);
searchers[1] = new IndexSearcher(indexStoreA);
searchers[0] = new IndexSearcher(indexStoreB, true);
searchers[1] = new IndexSearcher(indexStoreA, true);
// creating the multiSearcher
Searcher mSearcher = getMultiSearcherInstance(searchers);
// performing the search
@ -138,8 +138,8 @@ public class TestMultiSearcher extends LuceneTestCase
// building the searchables
Searcher[] searchers2 = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers2[0] = new IndexSearcher(indexStoreB);
searchers2[1] = new IndexSearcher(indexStoreA);
searchers2[0] = new IndexSearcher(indexStoreB, true);
searchers2[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
// performing the same search
@ -171,7 +171,7 @@ public class TestMultiSearcher extends LuceneTestCase
// deleting the document just added, this will cause a different exception to take place
Term term = new Term("id", "doc1");
IndexReader readerB = IndexReader.open(indexStoreB);
IndexReader readerB = IndexReader.open(indexStoreB, false);
readerB.deleteDocuments(term);
readerB.close();
@ -183,8 +183,8 @@ public class TestMultiSearcher extends LuceneTestCase
// building the searchables
Searcher[] searchers3 = new Searcher[2];
searchers3[0] = new IndexSearcher(indexStoreB);
searchers3[1] = new IndexSearcher(indexStoreA);
searchers3[0] = new IndexSearcher(indexStoreB, true);
searchers3[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
// performing the same search
@ -241,8 +241,8 @@ public class TestMultiSearcher extends LuceneTestCase
initIndex(ramDirectory1, 10, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(ramDirectory2, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1 = new IndexSearcher(ramDirectory1);
indexSearcher2 = new IndexSearcher(ramDirectory2);
indexSearcher1 = new IndexSearcher(ramDirectory1, true);
indexSearcher2 = new IndexSearcher(ramDirectory2, true);
MultiSearcher searcher = getMultiSearcherInstance(new Searcher[]{indexSearcher1, indexSearcher2});
assertTrue("searcher is null and it shouldn't be", searcher != null);
@ -297,7 +297,7 @@ public class TestMultiSearcher extends LuceneTestCase
initIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1=new IndexSearcher(ramDirectory1);
indexSearcher1=new IndexSearcher(ramDirectory1, true);
indexSearcher1.setDefaultFieldSortScoring(true, true);
hits=indexSearcher1.search(query, null, 1000).scoreDocs;
@ -325,9 +325,9 @@ public class TestMultiSearcher extends LuceneTestCase
initIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1=new IndexSearcher(ramDirectory1);
indexSearcher1=new IndexSearcher(ramDirectory1, true);
indexSearcher1.setDefaultFieldSortScoring(true, true);
indexSearcher2=new IndexSearcher(ramDirectory2);
indexSearcher2=new IndexSearcher(ramDirectory2, true);
indexSearcher2.setDefaultFieldSortScoring(true, true);
Searcher searcher=getMultiSearcherInstance(new Searcher[] { indexSearcher1, indexSearcher2 });
@ -363,7 +363,7 @@ public class TestMultiSearcher extends LuceneTestCase
public void testCustomSimilarity () throws IOException {
RAMDirectory dir = new RAMDirectory();
initIndex(dir, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
IndexSearcher srchr = new IndexSearcher(dir);
IndexSearcher srchr = new IndexSearcher(dir, true);
MultiSearcher msrchr = getMultiSearcherInstance(new Searcher[]{srchr});
Similarity customSimilarity = new DefaultSimilarity() {

View File

@ -125,8 +125,8 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
iw2.close();
Searchable[] s = new Searchable[2];
s[0] = new IndexSearcher(d1);
s[1] = new IndexSearcher(d2);
s[0] = new IndexSearcher(d1, true);
s[1] = new IndexSearcher(d2, true);
multiSearcher = new MultiSearcher(s);
// create IndexSearcher which contains all documents
@ -136,7 +136,7 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
addCollection1(iw);
addCollection2(iw);
iw.close();
singleSearcher = new IndexSearcher(d);
singleSearcher = new IndexSearcher(d, true);
}
private void addCollection1(IndexWriter iw) throws IOException {

View File

@ -146,7 +146,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small);
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
@ -175,7 +175,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small);
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
@ -243,7 +243,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
IndexReader reader = IndexReader.open(small);
IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
@ -274,7 +274,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -401,7 +401,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -484,7 +484,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndex.minR);
@ -547,7 +547,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = IndexReader.open(unsignedIndex.index);
IndexReader reader = IndexReader.open(unsignedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(unsignedIndex.minR);
@ -624,7 +624,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(farsiIndex);
IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
@ -668,7 +668,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(danishIndex);
IndexReader reader = IndexReader.open(danishIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));

View File

@ -62,7 +62,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase {
IndexReader reader = null;
try {
reader = IndexReader.open(directory);
reader = IndexReader.open(directory, true);
for(int i = 1; i <= numThreads; i++)
testTermPositionVectors(reader, i);

View File

@ -47,7 +47,7 @@ public class TestNot extends LuceneTestCase {
writer.optimize();
writer.close();
Searcher searcher = new IndexSearcher(store);
Searcher searcher = new IndexSearcher(store, true);
QueryParser parser = new QueryParser("field", new SimpleAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);

View File

@ -70,7 +70,7 @@ public class TestPhrasePrefixQuery
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
//PhrasePrefixQuery query1 = new PhrasePrefixQuery();
MultiPhraseQuery query1 = new MultiPhraseQuery();
@ -80,7 +80,7 @@ public class TestPhrasePrefixQuery
query2.add(new Term("body", "strawberry"));
LinkedList termsWithPrefix = new LinkedList();
IndexReader ir = IndexReader.open(indexStore);
IndexReader ir = IndexReader.open(indexStore, true);
// this TermEnum gives "piccadilly", "pie" and "pizza".
String prefix = "pi";

View File

@ -77,7 +77,7 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.optimize();
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
query = new PhraseQuery();
}
@ -209,7 +209,7 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
@ -249,7 +249,7 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
@ -287,7 +287,7 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.optimize();
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
@ -338,7 +338,7 @@ public class TestPhraseQuery extends LuceneTestCase {
writer.optimize();
writer.close();
Searcher searcher = new IndexSearcher(directory);
Searcher searcher = new IndexSearcher(directory, true);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));

View File

@ -92,7 +92,7 @@ public class TestPositionIncrement extends BaseTokenStreamTestCase {
writer.close();
IndexSearcher searcher = new IndexSearcher(store);
IndexSearcher searcher = new IndexSearcher(store, true);
TermPositions pos = searcher.getIndexReader().termPositions(new Term("field", "1"));
pos.next();

View File

@ -48,7 +48,7 @@ public class TestPrefixFilter extends LuceneTestCase {
// PrefixFilter combined with ConstantScoreQuery
PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
Query query = new ConstantScoreQuery(filter);
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(4, hits.length);

View File

@ -45,7 +45,7 @@ public class TestPrefixQuery extends LuceneTestCase {
writer.close();
PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
IndexSearcher searcher = new IndexSearcher(directory);
IndexSearcher searcher = new IndexSearcher(directory, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("All documents in /Computers category and below", 3, hits.length);

View File

@ -1,182 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.ConcurrentModificationException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
/**
* Test Hits searches with interleaved deletions.
*
* See {@link http://issues.apache.org/jira/browse/LUCENE-1096}.
* @deprecated Hits will be removed in Lucene 3.0
*/
public class TestSearchHitsWithDeletions extends LuceneTestCase {
private static boolean VERBOSE = false;
private static final String TEXT_FIELD = "text";
private static final int N = 16100;
private static Directory directory;
public void setUp() throws Exception {
super.setUp();
// Create an index writer.
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i=0; i<N; i++) {
writer.addDocument(createDocument(i));
}
writer.optimize();
writer.close();
}
/**
* Deletions during search should not alter previously retrieved hits.
*/
public void testSearchHitsDeleteAll() throws Exception {
doTestSearchHitsDeleteEvery(1,false);
}
/**
* Deletions during search should not alter previously retrieved hits.
*/
public void testSearchHitsDeleteEvery2ndHit() throws Exception {
doTestSearchHitsDeleteEvery(2,false);
}
/**
* Deletions during search should not alter previously retrieved hits.
*/
public void testSearchHitsDeleteEvery4thHit() throws Exception {
doTestSearchHitsDeleteEvery(4,false);
}
/**
* Deletions during search should not alter previously retrieved hits.
*/
public void testSearchHitsDeleteEvery8thHit() throws Exception {
doTestSearchHitsDeleteEvery(8,false);
}
/**
* Deletions during search should not alter previously retrieved hits.
*/
public void testSearchHitsDeleteEvery90thHit() throws Exception {
doTestSearchHitsDeleteEvery(90,false);
}
/**
* Deletions during search should not alter previously retrieved hits,
* and deletions that affect total number of hits should throw the
* correct exception when trying to fetch "too many".
*/
public void testSearchHitsDeleteEvery8thHitAndInAdvance() throws Exception {
doTestSearchHitsDeleteEvery(8,true);
}
/**
* Verify that ok also with no deletions at all.
*/
public void testSearchHitsNoDeletes() throws Exception {
doTestSearchHitsDeleteEvery(N+100,false);
}
/**
* Deletions that affect total number of hits should throw the
* correct exception when trying to fetch "too many".
*/
public void testSearchHitsDeleteInAdvance() throws Exception {
doTestSearchHitsDeleteEvery(N+100,true);
}
/**
* Intermittent deletions during search, should not alter previously retrieved hits.
* (Using a debugger to verify that the check in Hits is performed only
*/
public void testSearchHitsDeleteIntermittent() throws Exception {
doTestSearchHitsDeleteEvery(-1,false);
}
private void doTestSearchHitsDeleteEvery(int k, boolean deleteInFront) throws Exception {
boolean intermittent = k<0;
log("Test search hits with "+(intermittent ? "intermittent deletions." : "deletions of every "+k+" hit."));
IndexSearcher searcher = new IndexSearcher(directory);
IndexReader reader = searcher.getIndexReader();
Query q = new TermQuery(new Term(TEXT_FIELD,"text")); // matching all docs
Hits hits = searcher.search(q);
log("Got "+hits.length()+" results");
assertEquals("must match all "+N+" docs, not only "+hits.length()+" docs!",N,hits.length());
if (deleteInFront) {
log("deleting hits that was not yet retrieved!");
reader.deleteDocument(reader.maxDoc()-1);
reader.deleteDocument(reader.maxDoc()-2);
reader.deleteDocument(reader.maxDoc()-3);
}
try {
for (int i = 0; i < hits.length(); i++) {
int id = hits.id(i);
assertEquals("Hit "+i+" has doc id "+hits.id(i)+" instead of "+i,i,hits.id(i));
if ((intermittent && (i==50 || i==250 || i==950)) || //100-yes, 200-no, 400-yes, 800-no, 1600-yes
(!intermittent && (k<2 || (i>0 && i%k==0)))) {
Document doc = hits.doc(id);
log("Deleting hit "+i+" - doc "+doc+" with id "+id);
reader.deleteDocument(id);
}
if (intermittent) {
// check internal behavior of Hits (go 50 ahead of getMoreDocs points because the deletions cause to use more of the available hits)
if (i==150 || i==450 || i==1650) {
assertTrue("Hit "+i+": hits should have checked for deletions in last call to getMoreDocs()",hits.debugCheckedForDeletions);
} else if (i==50 || i==250 || i==850) {
assertFalse("Hit "+i+": hits should have NOT checked for deletions in last call to getMoreDocs()",hits.debugCheckedForDeletions);
}
}
}
} catch (ConcurrentModificationException e) {
// this is the only valid exception, and only when deletng in front.
assertTrue(e.getMessage()+" not expected unless deleting hits that were not yet seen!",deleteInFront);
}
searcher.close();
}
private static Document createDocument(int id) {
Document doc = new Document();
doc.add(new Field(TEXT_FIELD, "text of document"+id, Field.Store.YES, Field.Index.ANALYZED));
return doc;
}
private static void log (String s) {
if (VERBOSE) {
System.out.println(s);
}
}
}

View File

@ -52,7 +52,7 @@ public class TestSetNorm extends LuceneTestCase {
writer.close();
// reset the boost of each instance of this document
IndexReader reader = IndexReader.open(store);
IndexReader reader = IndexReader.open(store, false);
reader.setNorm(0, "field", 1.0f);
reader.setNorm(1, "field", 2.0f);
reader.setNorm(2, "field", 4.0f);
@ -62,7 +62,7 @@ public class TestSetNorm extends LuceneTestCase {
// check that searches are ordered by this boost
final float[] scores = new float[4];
new IndexSearcher(store).search
new IndexSearcher(store, true).search
(new TermQuery(new Term("field", "word")),
new Collector() {
private int base = 0;

View File

@ -67,7 +67,7 @@ public class TestSimilarity extends LuceneTestCase {
writer.optimize();
writer.close();
Searcher searcher = new IndexSearcher(store);
Searcher searcher = new IndexSearcher(store, true);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("field", "a");

View File

@ -331,8 +331,8 @@ public class TestSimpleExplanations extends TestExplanations {
Query query = parser.parse("handle:1");
Searcher[] searchers = new Searcher[2];
searchers[0] = new IndexSearcher(indexStoreB);
searchers[1] = new IndexSearcher(indexStoreA);
searchers[0] = new IndexSearcher(indexStoreB, true);
searchers[1] = new IndexSearcher(indexStoreA, true);
Searcher mSearcher = new MultiSearcher(searchers);
ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;

View File

@ -122,7 +122,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir);
IndexSearcher searcher = new IndexSearcher(ramDir, true);
TopDocs td = searcher.search(query,null,10);
//System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore());
assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, td.totalHits);

View File

@ -624,19 +624,6 @@ public class TestSort extends LuceneTestCase implements Serializable {
sort.setSort (new SortField ("i18n", new Locale("da", "dk")));
assertMatches (multiSearcher, queryY, sort, "BJDHF");
}
// test a custom sort function
public void testCustomSorts() throws Exception {
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource()));
assertMatches (full, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true));
assertMatches (full, queryY, sort, "HJDBF");
SortComparator custom = SampleComparable.getComparator();
sort.setSort (new SortField ("custom", custom));
assertMatches (full, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", custom, true));
assertMatches (full, queryY, sort, "HJDBF");
}
// test a variety of sorts using more than one searcher
public void testMultiSort() throws Exception {

View File

@ -50,7 +50,7 @@ public class TestSpanQueryFilter extends LuceneTestCase {
}
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim()));
SpanQueryFilter filter = new SpanQueryFilter(query);

View File

@ -165,20 +165,20 @@ public class TestStressSort extends LuceneTestCase {
}
}
writer.close();
searcherMultiSegment = new IndexSearcher(dir);
searcherMultiSegment = new IndexSearcher(dir, true);
searcherMultiSegment.setDefaultFieldSortScoring(true, true);
dir2 = new MockRAMDirectory(dir);
writer = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
searcherSingleSegment = new IndexSearcher(dir2);
searcherSingleSegment = new IndexSearcher(dir2, true);
searcherSingleSegment.setDefaultFieldSortScoring(true, true);
dir3 = new MockRAMDirectory(dir);
writer = new IndexWriter(dir3, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.optimize(3);
writer.close();
searcherFewSegment = new IndexSearcher(dir3);
searcherFewSegment = new IndexSearcher(dir3, true);
searcherFewSegment.setDefaultFieldSortScoring(true, true);
}

View File

@ -49,7 +49,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterId() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@ -131,7 +131,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterIdCollating() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(Locale.ENGLISH);
@ -214,7 +214,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterRand() throws IOException {
IndexReader reader = IndexReader.open(signedIndex.index);
IndexReader reader = IndexReader.open(signedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
String minRP = pad(signedIndex.minR);
@ -277,7 +277,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
public void testRangeFilterRandCollating() throws IOException {
// using the unsigned index because collation seems to ignore hyphens
IndexReader reader = IndexReader.open(unsignedIndex.index);
IndexReader reader = IndexReader.open(unsignedIndex.index, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(Locale.ENGLISH);
@ -354,7 +354,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(farsiIndex);
IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Query q = new TermQuery(new Term("body","body"));
@ -398,7 +398,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
writer.optimize();
writer.close();
IndexReader reader = IndexReader.open(danishIndex);
IndexReader reader = IndexReader.open(danishIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Query q = new TermQuery(new Term("body","body"));

View File

@ -48,19 +48,19 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testExclusive() throws Exception {
Query query = new TermRangeQuery("content", "A", "C", false, false);
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 1, hits.length);
searcher.close();
initializeIndex(new String[] {"A", "B", "D"});
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D, only B in range", 1, hits.length);
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added, still only B in range", 1, hits.length);
searcher.close();
@ -70,14 +70,14 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testDeprecatedCstrctors() throws IOException {
Query query = new RangeQuery(null, new Term("content","C"), false);
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 2, hits.length);
searcher.close();
query = new RangeQuery(new Term("content","C"),null, false);
initializeIndex(new String[] {"A", "B", "C", "D"});
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 1, hits.length);
searcher.close();
@ -87,19 +87,19 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = new TermRangeQuery("content", "A", "C", true, true);
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
searcher.close();
initializeIndex(new String[]{"A", "B", "D"});
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D - A and B in range", 2, hits.length);
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added - A, B, C in range", 3, hits.length);
searcher.close();
@ -154,19 +154,19 @@ public class TestTermRangeQuery extends LuceneTestCase {
public void testExclusiveCollating() throws Exception {
Query query = new TermRangeQuery("content", "A", "C", false, false, Collator.getInstance(Locale.ENGLISH));
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D, only B in range", 1, hits.length);
searcher.close();
initializeIndex(new String[] {"A", "B", "D"});
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D, only B in range", 1, hits.length);
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added, still only B in range", 1, hits.length);
searcher.close();
@ -176,19 +176,19 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = new TermRangeQuery("content", "A", "C",true, true, Collator.getInstance(Locale.ENGLISH));
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,C,D - A,B,C in range", 3, hits.length);
searcher.close();
initializeIndex(new String[]{"A", "B", "D"});
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("A,B,D - A and B in range", 2, hits.length);
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("C added - A, B, C in range", 3, hits.length);
searcher.close();
@ -205,7 +205,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
// index Term below should NOT be returned by a TermRangeQuery with a Farsi
// Collator (or an Arabic one for the case when Farsi is not supported).
initializeIndex(new String[]{ "\u0633\u0627\u0628"});
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, hits.length);
@ -225,7 +225,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
initializeIndex(words);
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, hits.length);
@ -318,7 +318,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
Query query = new TermRangeQuery("content", null, "C",
false, false);
initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer);
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
Hits hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 3, hits.length());
@ -327,7 +327,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
searcher.close();
initializeIndex(new String[] {"A", "B", "", "D"}, analyzer);
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 3, hits.length());
@ -335,7 +335,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
//assertEquals("A,B,<empty string>,D => A, B & <empty string> are in range", 2, hits.length());
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added, still A, B & <empty string> are in range", 3, hits.length());
@ -350,7 +350,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
Analyzer analyzer = new SingleCharAnalyzer();
Query query = new TermRangeQuery("content", null, "C", true, true);
initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer);
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
Hits hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 4, hits.length());
@ -358,7 +358,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
//assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 3, hits.length());
searcher.close();
initializeIndex(new String[]{"A", "B", "", "D"}, analyzer);
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("A,B,<empty string>,D - A, B and <empty string> in range", 3, hits.length());
@ -366,7 +366,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
//assertEquals("A,B,<empty string>,D => A, B and <empty string> in range", 2, hits.length());
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
searcher = new IndexSearcher(dir, true);
hits = searcher.search(query);
// When Lucene-38 is fixed, use the assert on the next line:
assertEquals("C added => A,B,<empty string>,C in range", 4, hits.length());

View File

@ -58,7 +58,7 @@ public class TestTermScorer extends LuceneTestCase
writer.addDocument(doc);
}
writer.close();
indexSearcher = new IndexSearcher(directory);
indexSearcher = new IndexSearcher(directory, false);
indexReader = indexSearcher.getIndexReader();

View File

@ -67,7 +67,7 @@ public class TestTermVectors extends LuceneTestCase {
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
}
public void test() {
@ -101,7 +101,7 @@ public class TestTermVectors extends LuceneTestCase {
doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
IndexReader reader = IndexReader.open(dir, true);
TermFreqVector[] v = reader.getTermFreqVectors(0);
assertEquals(4, v.length);
String[] expectedFields = new String[]{"a", "b", "c", "x"};
@ -240,7 +240,7 @@ public class TestTermVectors extends LuceneTestCase {
writer.addDocument(testDoc3);
writer.addDocument(testDoc4);
writer.close();
IndexSearcher knownSearcher = new IndexSearcher(dir);
IndexSearcher knownSearcher = new IndexSearcher(dir, true);
TermEnum termEnum = knownSearcher.reader.terms();
TermDocs termDocs = knownSearcher.reader.termDocs();
//System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
@ -366,7 +366,7 @@ public class TestTermVectors extends LuceneTestCase {
}
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
Query query = new TermQuery(new Term("field", "hundred"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -398,7 +398,7 @@ public class TestTermVectors extends LuceneTestCase {
writer.addDocument(doc);
writer.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
Query query = new TermQuery(new Term("field", "one"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;

View File

@ -151,7 +151,7 @@ public class TestThreadSafe extends LuceneTestCase {
// do many small tests so the thread locals go away inbetween
for (int i=0; i<100; i++) {
ir1 = IndexReader.open(dir1);
ir1 = IndexReader.open(dir1, false);
doTest(10,100);
}
}

View File

@ -1,328 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.util.BitSet;
/**
* Tests the TimeLimitedCollector. This test checks (1) search
* correctness (regardless of timeout), (2) expected timeout behavior,
* and (3) a sanity test with multiple searching threads.
*/
public class TestTimeLimitedCollector extends LuceneTestCase {
private static final int SLOW_DOWN = 47;
private static final long TIME_ALLOWED = 17 * SLOW_DOWN; // so searches can find about 17 docs.
// max time allowed is relaxed for multithreading tests.
// the multithread case fails when setting this to 1 (no slack) and launching many threads (>2000).
// but this is not a real failure, just noise.
private static final double MULTI_THREAD_SLACK = 7;
private static final int N_DOCS = 3000;
private static final int N_THREADS = 50;
private Searcher searcher;
private final String FIELD_NAME = "body";
private Query query;
public TestTimeLimitedCollector(String name) {
super(name);
}
/**
* initializes searcher with a document set
*/
protected void setUp() throws Exception {
super.setUp();
final String docText[] = {
"docThatNeverMatchesSoWeCanRequireLastDocCollectedToBeGreaterThanZero",
"one blah three",
"one foo three multiOne",
"one foobar three multiThree",
"blueberry pancakes",
"blueberry pie",
"blueberry strudel",
"blueberry pizza",
};
Directory directory = new RAMDirectory();
IndexWriter iw = new IndexWriter(directory, new WhitespaceAnalyzer(), true, MaxFieldLength.UNLIMITED);
for (int i=0; i<N_DOCS; i++) {
add(docText[i%docText.length], iw);
}
iw.close();
searcher = new IndexSearcher(directory);
String qtxt = "one";
for (int i = 0; i < docText.length; i++) {
qtxt += ' ' + docText[i]; // large query so that search will be longer
}
QueryParser queryParser = new QueryParser(FIELD_NAME, new WhitespaceAnalyzer());
query = queryParser.parse(qtxt);
// warm the searcher
searcher.search(query, null, 1000);
}
public void tearDown() throws Exception {
searcher.close();
super.tearDown();
}
private void add(String value, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field(FIELD_NAME, value, Field.Store.NO, Field.Index.ANALYZED));
iw.addDocument(d);
}
private void search(HitCollector collector) throws Exception {
searcher.search(query, collector);
}
/**
* test search correctness with no timeout
*/
public void testSearch() {
doTestSearch();
}
private void doTestSearch() {
int totalResults = 0;
int totalTLCResults = 0;
try {
MyHitCollector myHc = new MyHitCollector();
search(myHc);
totalResults = myHc.hitCount();
myHc = new MyHitCollector();
long oneHour = 3600000;
HitCollector tlCollector = createTimedCollector(myHc, oneHour, false);
search(tlCollector);
totalTLCResults = myHc.hitCount();
} catch (Exception e) {
e.printStackTrace();
assertTrue("Unexpected exception: "+e, false); //==fail
}
assertEquals( "Wrong number of results!", totalResults, totalTLCResults );
}
private HitCollector createTimedCollector(MyHitCollector hc, long timeAllowed, boolean greedy) {
TimeLimitedCollector res = new TimeLimitedCollector(hc, timeAllowed);
res.setGreedy(greedy); // set to true to make sure at least one doc is collected.
return res;
}
/**
* Test that timeout is obtained, and soon enough!
*/
public void testTimeoutGreedy() {
doTestTimeout(false, true);
}
/**
* Test that timeout is obtained, and soon enough!
*/
public void testTimeoutNotGreedy() {
doTestTimeout(false, false);
}
private void doTestTimeout(boolean multiThreaded, boolean greedy) {
// setup
MyHitCollector myHc = new MyHitCollector();
myHc.setSlowDown(SLOW_DOWN);
HitCollector tlCollector = createTimedCollector(myHc, TIME_ALLOWED, greedy);
// search
TimeLimitedCollector.TimeExceededException timoutException = null;
try {
search(tlCollector);
} catch (TimeLimitedCollector.TimeExceededException x) {
timoutException = x;
} catch (Exception e) {
assertTrue("Unexpected exception: "+e, false); //==fail
}
// must get exception
assertNotNull( "Timeout expected!", timoutException );
// greediness affect last doc collected
int exceptionDoc = timoutException.getLastDocCollected();
int lastCollected = myHc.getLastDocCollected();
assertTrue( "doc collected at timeout must be > 0!", exceptionDoc > 0 );
if (greedy) {
assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" != lastCollected="+lastCollected, exceptionDoc==lastCollected);
assertTrue("greedy, but no hits found!", myHc.hitCount() > 0 );
} else {
assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" not > lastCollected="+lastCollected, exceptionDoc>lastCollected);
}
// verify that elapsed time at exception is within valid limits
assertEquals( timoutException.getTimeAllowed(), TIME_ALLOWED);
// a) Not too early
assertTrue ( "elapsed="+timoutException.getTimeElapsed()+" <= (allowed-resolution)="+(TIME_ALLOWED-TimeLimitedCollector.getResolution()),
timoutException.getTimeElapsed() > TIME_ALLOWED-TimeLimitedCollector.getResolution());
// b) Not too late.
// This part is problematic in a busy test system, so we just print a warning.
// We already verified that a timeout occurred, we just can't be picky about how long it took.
if (timoutException.getTimeElapsed() > maxTime(multiThreaded)) {
System.out.println("Informative: timeout exceeded (no action required: most probably just " +
" because the test machine is slower than usual): " +
"lastDoc="+exceptionDoc+
" ,&& allowed="+timoutException.getTimeAllowed() +
" ,&& elapsed="+timoutException.getTimeElapsed() +
" >= " + maxTimeStr(multiThreaded));
}
}
private long maxTime(boolean multiThreaded) {
long res = 2 * TimeLimitedCollector.getResolution() + TIME_ALLOWED + SLOW_DOWN; // some slack for less noise in this test
if (multiThreaded) {
res *= MULTI_THREAD_SLACK; // larger slack
}
return res;
}
private String maxTimeStr(boolean multiThreaded) {
String s =
"( " +
"2*resolution + TIME_ALLOWED + SLOW_DOWN = " +
"2*" + TimeLimitedCollector.getResolution() + " + " + TIME_ALLOWED + " + " + SLOW_DOWN +
")";
if (multiThreaded) {
s = MULTI_THREAD_SLACK + " * "+s;
}
return maxTime(multiThreaded) + " = " + s;
}
/**
* Test timeout behavior when resolution is modified.
*/
public void testModifyResolution() {
try {
// increase and test
long resolution = 20 * TimeLimitedCollector.DEFAULT_RESOLUTION; //400
TimeLimitedCollector.setResolution(resolution);
assertEquals(resolution, TimeLimitedCollector.getResolution());
doTestTimeout(false,true);
// decrease much and test
resolution = 5;
TimeLimitedCollector.setResolution(resolution);
assertEquals(resolution, TimeLimitedCollector.getResolution());
doTestTimeout(false,true);
// return to default and test
resolution = TimeLimitedCollector.DEFAULT_RESOLUTION;
TimeLimitedCollector.setResolution(resolution);
assertEquals(resolution, TimeLimitedCollector.getResolution());
doTestTimeout(false,true);
} finally {
TimeLimitedCollector.setResolution(TimeLimitedCollector.DEFAULT_RESOLUTION);
}
}
/**
* Test correctness with multiple searching threads.
*/
public void testSearchMultiThreaded() throws Exception {
doTestMultiThreads(false);
}
/**
* Test correctness with multiple searching threads.
*/
public void testTimeoutMultiThreaded() throws Exception {
doTestMultiThreads(true);
}
private void doTestMultiThreads(final boolean withTimeout) throws Exception {
Thread [] threadArray = new Thread[N_THREADS];
final BitSet success = new BitSet(N_THREADS);
for( int i = 0; i < threadArray.length; ++i ) {
final int num = i;
threadArray[num] = new Thread() {
public void run() {
if (withTimeout) {
doTestTimeout(true,true);
} else {
doTestSearch();
}
synchronized(success) {
success.set(num);
}
}
};
}
for( int i = 0; i < threadArray.length; ++i ) {
threadArray[i].start();
}
for( int i = 0; i < threadArray.length; ++i ) {
threadArray[i].join();
}
assertEquals("some threads failed!", N_THREADS,success.cardinality());
}
// counting hit collector that can slow down at collect().
private class MyHitCollector extends HitCollector
{
private final BitSet bits = new BitSet();
private int slowdown = 0;
private int lastDocCollected = -1;
/**
* amount of time to wait on each collect to simulate a long iteration
*/
public void setSlowDown( int milliseconds ) {
slowdown = milliseconds;
}
public void collect( final int docId, final float score ) {
if( slowdown > 0 ) {
try {
Thread.sleep(slowdown);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
}
assert docId >= 0: " doc=" + docId;
bits.set( docId );
lastDocCollected = docId;
}
public int hitCount() {
return bits.cardinality();
}
public int getLastDocCollected() {
return lastDocCollected;
}
}
}

View File

@ -79,7 +79,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
add(docText[i%docText.length], iw);
}
iw.close();
searcher = new IndexSearcher(directory);
searcher = new IndexSearcher(directory, true);
String qtxt = "one";
for (int i = 0; i < docText.length; i++) {

View File

@ -91,7 +91,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
private TopDocsCollector doSearch(int numResults) throws IOException {
Query q = new MatchAllDocsQuery();
IndexSearcher searcher = new IndexSearcher(dir);
IndexSearcher searcher = new IndexSearcher(dir, true);
TopDocsCollector tdc = new MyTopsDocCollector(numResults);
searcher.search(q, tdc);
searcher.close();

Some files were not shown because too many files have changed in this diff Show More