From 6cf3fb071a8a89e2400f534fb59ab0784c37eb99 Mon Sep 17 00:00:00 2001 From: Mark Robert Miller Date: Thu, 24 Sep 2009 19:33:28 +0000 Subject: [PATCH 1/4] Starting Lucene 2.9 branch. git-svn-id: https://svn.apache.org/repos/asf/lucene/java/branches/lucene_2_9@818600 13f79535-47bb-0310-9956-ffa450edef68 From aed1f31fd6d38b6684f552063fde288f7419c2b0 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Wed, 30 Sep 2009 16:14:50 +0000 Subject: [PATCH 2/4] Create a new branch for 2.9 backwards tests. This branch (with deprecated tests removed soonly) should be compiled with java 1.4 by trunk and tests should pass with Java-1.5-generics-enabled trunk Lucene git-svn-id: https://svn.apache.org/repos/asf/lucene/java/branches/lucene_2_9_back_compat_tests@820331 13f79535-47bb-0310-9956-ffa450edef68 From ce7761cb296197b6b890c2239a18658061315ddb Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Wed, 30 Sep 2009 17:35:06 +0000 Subject: [PATCH 3/4] disable the checkindex version test in backwards git-svn-id: https://svn.apache.org/repos/asf/lucene/java/branches/lucene_2_9_back_compat_tests@820359 13f79535-47bb-0310-9956-ffa450edef68 --- src/test/org/apache/lucene/index/TestCheckIndex.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/test/org/apache/lucene/index/TestCheckIndex.java b/src/test/org/apache/lucene/index/TestCheckIndex.java index 52fb5e4a3d4..78ec8b0b43b 100644 --- a/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -90,6 +90,7 @@ public class TestCheckIndex extends LuceneTestCase { assertTrue(checker.checkIndex(onlySegments).clean == true); } + /* Does not work, because compilation puts final field from Constants of 2.9 into class file: public void testLuceneConstantVersion() throws IOException { // common-build.xml sets lucene.version final String version = System.getProperty("lucene.version"); @@ -97,5 +98,5 @@ public class TestCheckIndex extends LuceneTestCase { assertTrue(version.equals(Constants.LUCENE_MAIN_VERSION+"-dev") || version.equals(Constants.LUCENE_MAIN_VERSION)); assertTrue(Constants.LUCENE_VERSION.startsWith(version)); - } + }*/ } From 435a29d37d613ffbaffa3206bff99ccedae8e86a Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Sat, 3 Oct 2009 15:04:01 +0000 Subject: [PATCH 4/4] fix deprecated tests #1 git-svn-id: https://svn.apache.org/repos/asf/lucene/java/branches/lucene_2_9_back_compat_tests@821335 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/TestHitIterator.java | 85 --- src/test/org/apache/lucene/TestSearch.java | 2 +- .../lucene/TestSearchForDuplicates.java | 4 +- .../analysis/BaseTokenStreamTestCase.java | 38 +- .../analysis/TestCachingTokenFilter.java | 2 +- .../lucene/analysis/TestCharacterCache.java | 34 -- .../analysis/TestISOLatin1AccentFilter.java | 111 ---- .../lucene/analysis/TestKeywordAnalyzer.java | 4 +- .../lucene/analysis/TestTeeTokenFilter.java | 258 --------- .../analysis/TestTokenStreamBWComp.java | 393 ------------- .../lucene/document/TestBinaryDocument.java | 4 +- .../apache/lucene/document/TestDateTools.java | 198 ------- .../apache/lucene/document/TestDocument.java | 4 +- .../lucene/document/TestNumberTools.java | 82 --- .../index/TestAddIndexesNoOptimize.java | 10 +- .../apache/lucene/index/TestAtomicUpdate.java | 4 +- .../index/TestBackwardsCompatibility.java | 543 ------------------ .../apache/lucene/index/TestCheckIndex.java | 2 +- .../index/TestConcurrentMergeScheduler.java | 6 +- .../org/apache/lucene/index/TestCrash.java | 18 +- .../lucene/index/TestDeletionPolicy.java | 34 +- .../lucene/index/TestDirectoryReader.java | 8 +- .../lucene/index/TestDocumentWriter.java | 2 +- .../apache/lucene/index/TestFieldsReader.java | 2 +- .../lucene/index/TestFilterIndexReader.java | 2 +- .../lucene/index/TestIndexFileDeleter.java | 2 +- .../lucene/index/TestIndexModifier.java | 281 --------- .../apache/lucene/index/TestIndexReader.java | 148 ++--- .../lucene/index/TestIndexReaderClone.java | 56 +- .../index/TestIndexReaderCloneNorms.java | 10 +- .../lucene/index/TestIndexReaderReopen.java | 144 ++--- .../apache/lucene/index/TestIndexWriter.java | 258 ++++----- .../lucene/index/TestIndexWriterDelete.java | 30 +- .../index/TestIndexWriterExceptions.java | 4 +- .../index/TestIndexWriterMergePolicy.java | 2 +- .../lucene/index/TestIndexWriterMerging.java | 2 +- .../lucene/index/TestIndexWriterReader.java | 8 +- .../org/apache/lucene/index/TestLazyBug.java | 2 +- .../lucene/index/TestLazyProxSkipping.java | 2 +- .../org/apache/lucene/index/TestNorms.java | 4 +- .../org/apache/lucene/index/TestOmitTf.java | 2 +- .../lucene/index/TestParallelReader.java | 42 +- .../lucene/index/TestParallelTermEnum.java | 4 +- .../org/apache/lucene/index/TestPayloads.java | 6 +- .../lucene/index/TestSegmentTermEnum.java | 2 +- .../lucene/index/TestStressIndexing.java | 2 +- .../lucene/index/TestStressIndexing2.java | 6 +- .../lucene/index/TestTermVectorsReader.java | 2 +- .../apache/lucene/index/TestTermdocPerf.java | 2 +- .../lucene/index/TestThreadedOptimize.java | 2 +- .../lucene/index/TestTransactionRollback.java | 4 +- .../apache/lucene/index/TestTransactions.java | 4 +- .../TestMultiFieldQueryParser.java | 2 +- .../lucene/queryParser/TestQueryParser.java | 112 +--- .../org/apache/lucene/search/QueryUtils.java | 40 +- .../lucene/search/SampleComparable.java | 149 ----- .../apache/lucene/search/TestBoolean2.java | 2 +- .../search/TestBooleanMinShouldMatch.java | 2 +- .../apache/lucene/search/TestBooleanOr.java | 2 +- .../lucene/search/TestBooleanPrefixQuery.java | 2 +- .../lucene/search/TestBooleanScorer.java | 2 +- .../search/TestCachingWrapperFilter.java | 4 +- .../apache/lucene/search/TestDateFilter.java | 4 +- .../apache/lucene/search/TestDateSort.java | 2 +- .../search/TestDisjunctionMaxQuery.java | 2 +- .../apache/lucene/search/TestDocBoost.java | 2 +- .../lucene/search/TestExplanations.java | 2 +- .../apache/lucene/search/TestFieldCache.java | 2 +- .../search/TestFieldCacheRangeFilter.java | 16 +- .../search/TestFieldCacheTermsFilter.java | 2 +- .../lucene/search/TestFilteredSearch.java | 2 +- .../apache/lucene/search/TestFuzzyQuery.java | 6 +- .../lucene/search/TestMatchAllDocsQuery.java | 2 +- .../lucene/search/TestMultiPhraseQuery.java | 8 +- .../lucene/search/TestMultiSearcher.java | 26 +- .../search/TestMultiSearcherRanking.java | 6 +- .../search/TestMultiTermConstantScore.java | 18 +- .../search/TestMultiThreadTermVectors.java | 2 +- .../org/apache/lucene/search/TestNot.java | 2 +- .../lucene/search/TestPhrasePrefixQuery.java | 4 +- .../apache/lucene/search/TestPhraseQuery.java | 10 +- .../lucene/search/TestPositionIncrement.java | 2 +- .../lucene/search/TestPrefixFilter.java | 2 +- .../apache/lucene/search/TestPrefixQuery.java | 2 +- .../search/TestSearchHitsWithDeletions.java | 182 ------ .../org/apache/lucene/search/TestSetNorm.java | 4 +- .../apache/lucene/search/TestSimilarity.java | 2 +- .../lucene/search/TestSimpleExplanations.java | 4 +- .../lucene/search/TestSloppyPhraseQuery.java | 2 +- .../org/apache/lucene/search/TestSort.java | 13 - .../lucene/search/TestSpanQueryFilter.java | 2 +- .../apache/lucene/search/TestStressSort.java | 6 +- .../lucene/search/TestTermRangeFilter.java | 12 +- .../lucene/search/TestTermRangeQuery.java | 44 +- .../apache/lucene/search/TestTermScorer.java | 2 +- .../apache/lucene/search/TestTermVectors.java | 10 +- .../apache/lucene/search/TestThreadSafe.java | 2 +- .../search/TestTimeLimitedCollector.java | 328 ----------- .../search/TestTimeLimitingCollector.java | 2 +- .../lucene/search/TestTopDocsCollector.java | 2 +- .../search/TestTopScoreDocCollector.java | 2 +- .../apache/lucene/search/TestWildcard.java | 8 +- .../search/function/TestCustomScoreQuery.java | 2 +- .../search/function/TestFieldScoreQuery.java | 8 +- .../lucene/search/function/TestOrdValues.java | 8 +- .../lucene/search/payloads/PayloadHelper.java | 2 +- .../payloads/TestBoostingTermQuery.java | 245 -------- .../lucene/search/spans/TestBasics.java | 2 +- .../spans/TestFieldMaskingSpanQuery.java | 2 +- .../search/spans/TestNearSpansOrdered.java | 2 +- .../lucene/search/spans/TestPayloadSpans.java | 12 +- .../apache/lucene/search/spans/TestSpans.java | 4 +- .../search/spans/TestSpansAdvanced.java | 2 +- .../search/spans/TestSpansAdvanced2.java | 4 +- .../apache/lucene/store/MockRAMDirectory.java | 8 - .../lucene/store/TestBufferedIndexInput.java | 2 +- .../apache/lucene/store/TestLockFactory.java | 34 +- .../apache/lucene/store/TestRAMDirectory.java | 57 +- .../apache/lucene/store/TestWindowsMMap.java | 2 +- .../apache/lucene/util/LuceneTestCase.java | 6 - .../util/TestFieldCacheSanityChecker.java | 4 +- 121 files changed, 515 insertions(+), 3840 deletions(-) delete mode 100644 src/test/org/apache/lucene/TestHitIterator.java delete mode 100644 src/test/org/apache/lucene/analysis/TestCharacterCache.java delete mode 100644 src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java delete mode 100644 src/test/org/apache/lucene/analysis/TestTeeTokenFilter.java delete mode 100644 src/test/org/apache/lucene/analysis/TestTokenStreamBWComp.java delete mode 100644 src/test/org/apache/lucene/document/TestDateTools.java delete mode 100644 src/test/org/apache/lucene/document/TestNumberTools.java delete mode 100644 src/test/org/apache/lucene/index/TestBackwardsCompatibility.java delete mode 100644 src/test/org/apache/lucene/index/TestIndexModifier.java delete mode 100644 src/test/org/apache/lucene/search/SampleComparable.java delete mode 100644 src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java delete mode 100755 src/test/org/apache/lucene/search/TestTimeLimitedCollector.java delete mode 100644 src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java diff --git a/src/test/org/apache/lucene/TestHitIterator.java b/src/test/org/apache/lucene/TestHitIterator.java deleted file mode 100644 index a5a3d2e5243..00000000000 --- a/src/test/org/apache/lucene/TestHitIterator.java +++ /dev/null @@ -1,85 +0,0 @@ -package org.apache.lucene; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; -import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.Hits; -import org.apache.lucene.search.Hit; -import org.apache.lucene.search.HitIterator; - -import java.util.NoSuchElementException; - -/** - * This test intentionally not put in the search package in order - * to test HitIterator and Hit package protection. - * - * @deprecated Hits will be removed in Lucene 3.0 - */ -public class TestHitIterator extends LuceneTestCase { - public void testIterator() throws Exception { - RAMDirectory directory = new RAMDirectory(); - - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, - IndexWriter.MaxFieldLength.LIMITED); - Document doc = new Document(); - doc.add(new Field("field", "iterator test doc 1", Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); - - doc = new Document(); - doc.add(new Field("field", "iterator test doc 2", Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); - - writer.close(); - - _TestUtil.checkIndex(directory); - - IndexSearcher searcher = new IndexSearcher(directory); - Hits hits = searcher.search(new TermQuery(new Term("field", "iterator"))); - - HitIterator iterator = (HitIterator) hits.iterator(); - assertEquals(2, iterator.length()); - assertTrue(iterator.hasNext()); - Hit hit = (Hit) iterator.next(); - assertEquals("iterator test doc 1", hit.get("field")); - - assertTrue(iterator.hasNext()); - hit = (Hit) iterator.next(); - assertEquals("iterator test doc 2", hit.getDocument().get("field")); - - assertFalse(iterator.hasNext()); - - boolean caughtException = false; - try { - iterator.next(); - } catch (NoSuchElementException e) { - assertTrue(true); - caughtException = true; - } - - assertTrue(caughtException); - } -} diff --git a/src/test/org/apache/lucene/TestSearch.java b/src/test/org/apache/lucene/TestSearch.java index e3d09264622..7ccf4520dd0 100644 --- a/src/test/org/apache/lucene/TestSearch.java +++ b/src/test/org/apache/lucene/TestSearch.java @@ -98,7 +98,7 @@ public class TestSearch extends LuceneTestCase { } writer.close(); - Searcher searcher = new IndexSearcher(directory); + Searcher searcher = new IndexSearcher(directory, true); String[] queries = { "a b", diff --git a/src/test/org/apache/lucene/TestSearchForDuplicates.java b/src/test/org/apache/lucene/TestSearchForDuplicates.java index 4f4b555dd86..576c4ffd965 100644 --- a/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -100,7 +100,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { writer.close(); // try a search without OR - Searcher searcher = new IndexSearcher(directory); + Searcher searcher = new IndexSearcher(directory, true); QueryParser parser = new QueryParser(PRIORITY_FIELD, analyzer); @@ -114,7 +114,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { searcher.close(); // try a new search with OR - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); hits = null; parser = new QueryParser(PRIORITY_FIELD, analyzer); diff --git a/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java b/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java index 452a0eb8cf4..4d48bce2cdc 100644 --- a/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java +++ b/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java @@ -32,56 +32,22 @@ import org.apache.lucene.util.LuceneTestCase; */ public abstract class BaseTokenStreamTestCase extends LuceneTestCase { - private boolean onlyUseNewAPI = false; - private final Set testWithNewAPI; - public BaseTokenStreamTestCase() { super(); - this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI } public BaseTokenStreamTestCase(String name) { super(name); - this.testWithNewAPI = null; // run all tests also with onlyUseNewAPI } + /** @deprecated */ public BaseTokenStreamTestCase(Set testWithNewAPI) { super(); - this.testWithNewAPI = testWithNewAPI; } + /** @deprecated */ public BaseTokenStreamTestCase(String name, Set testWithNewAPI) { super(name); - this.testWithNewAPI = testWithNewAPI; - } - - // @Override - protected void setUp() throws Exception { - super.setUp(); - TokenStream.setOnlyUseNewAPI(onlyUseNewAPI); - } - - // @Override - public void runBare() throws Throwable { - // Do the test with onlyUseNewAPI=false (default) - try { - onlyUseNewAPI = false; - super.runBare(); - } catch (Throwable e) { - System.out.println("Test failure of '"+getName()+"' occurred with onlyUseNewAPI=false"); - throw e; - } - - if (testWithNewAPI == null || testWithNewAPI.contains(getName())) { - // Do the test again with onlyUseNewAPI=true - try { - onlyUseNewAPI = true; - super.runBare(); - } catch (Throwable e) { - System.out.println("Test failure of '"+getName()+"' occurred with onlyUseNewAPI=true"); - throw e; - } - } } // some helpers to test Analyzers and TokenStreams: diff --git a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index 22aa3827429..7c02b6ac283 100644 --- a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -70,7 +70,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase { writer.addDocument(doc); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1")); assertTrue(termPositions.next()); assertEquals(1, termPositions.freq()); diff --git a/src/test/org/apache/lucene/analysis/TestCharacterCache.java b/src/test/org/apache/lucene/analysis/TestCharacterCache.java deleted file mode 100644 index c65623bde58..00000000000 --- a/src/test/org/apache/lucene/analysis/TestCharacterCache.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.analysis; - -import org.apache.lucene.util.LuceneTestCase; -/** - * Testcase for {@link CharacterCache} - */ -public class TestCharacterCache extends LuceneTestCase { - - public void testValueOf() { - for (int i = 0; i < 256; i++) { - Character valueOf = CharacterCache.valueOf((char)i); - assertEquals((char)i, valueOf.charValue()); - } - - - } - -} diff --git a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java b/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java deleted file mode 100644 index 8a6b7c63dd0..00000000000 --- a/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java +++ /dev/null @@ -1,111 +0,0 @@ -package org.apache.lucene.analysis; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.analysis.tokenattributes.TermAttribute; - -import java.io.StringReader; - -public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase { - public void testU() throws Exception { - TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl")); - ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream); - TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class); - assertTermEquals("Des", filter, termAtt); - assertTermEquals("mot", filter, termAtt); - assertTermEquals("cles", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("LA", filter, termAtt); - assertTermEquals("CHAINE", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("A", filter, termAtt); - assertTermEquals("AE", filter, termAtt); - assertTermEquals("C", filter, termAtt); - assertTermEquals("E", filter, termAtt); - assertTermEquals("E", filter, termAtt); - assertTermEquals("E", filter, termAtt); - assertTermEquals("E", filter, termAtt); - assertTermEquals("I", filter, termAtt); - assertTermEquals("I", filter, termAtt); - assertTermEquals("I", filter, termAtt); - assertTermEquals("I", filter, termAtt); - assertTermEquals("IJ", filter, termAtt); - assertTermEquals("D", filter, termAtt); - assertTermEquals("N", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("O", filter, termAtt); - assertTermEquals("OE", filter, termAtt); - assertTermEquals("TH", filter, termAtt); - assertTermEquals("U", filter, termAtt); - assertTermEquals("U", filter, termAtt); - assertTermEquals("U", filter, termAtt); - assertTermEquals("U", filter, termAtt); - assertTermEquals("Y", filter, termAtt); - assertTermEquals("Y", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("a", filter, termAtt); - assertTermEquals("ae", filter, termAtt); - assertTermEquals("c", filter, termAtt); - assertTermEquals("e", filter, termAtt); - assertTermEquals("e", filter, termAtt); - assertTermEquals("e", filter, termAtt); - assertTermEquals("e", filter, termAtt); - assertTermEquals("i", filter, termAtt); - assertTermEquals("i", filter, termAtt); - assertTermEquals("i", filter, termAtt); - assertTermEquals("i", filter, termAtt); - assertTermEquals("ij", filter, termAtt); - assertTermEquals("d", filter, termAtt); - assertTermEquals("n", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("o", filter, termAtt); - assertTermEquals("oe", filter, termAtt); - assertTermEquals("ss", filter, termAtt); - assertTermEquals("th", filter, termAtt); - assertTermEquals("u", filter, termAtt); - assertTermEquals("u", filter, termAtt); - assertTermEquals("u", filter, termAtt); - assertTermEquals("u", filter, termAtt); - assertTermEquals("y", filter, termAtt); - assertTermEquals("y", filter, termAtt); - assertTermEquals("fi", filter, termAtt); - assertTermEquals("fl", filter, termAtt); - assertFalse(filter.incrementToken()); - } - - void assertTermEquals(String expected, TokenStream stream, TermAttribute termAtt) throws Exception { - assertTrue(stream.incrementToken()); - assertEquals(expected, termAtt.term()); - } -} diff --git a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java index e19ab4f1a5f..02d11b9aa88 100644 --- a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java @@ -51,7 +51,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } public void testPerFieldAnalyzer() throws Exception { @@ -78,7 +78,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { writer.addDocument(doc); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); TermDocs td = reader.termDocs(new Term("partnum", "Q36")); assertTrue(td.next()); td = reader.termDocs(new Term("partnum", "Q37")); diff --git a/src/test/org/apache/lucene/analysis/TestTeeTokenFilter.java b/src/test/org/apache/lucene/analysis/TestTeeTokenFilter.java deleted file mode 100644 index d4ddb28e395..00000000000 --- a/src/test/org/apache/lucene/analysis/TestTeeTokenFilter.java +++ /dev/null @@ -1,258 +0,0 @@ -package org.apache.lucene.analysis; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.analysis.standard.StandardFilter; -import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.util.English; -import org.apache.lucene.util.LuceneTestCase; - -import java.io.IOException; -import java.io.StringReader; -import java.util.ArrayList; -import java.util.List; - -/** - * tests for the TeeTokenFilter and SinkTokenizer - */ -public class TestTeeTokenFilter extends LuceneTestCase { - protected StringBuffer buffer1; - protected StringBuffer buffer2; - protected String[] tokens1; - protected String[] tokens2; - - - public TestTeeTokenFilter(String s) { - super(s); - } - - protected void setUp() throws Exception { - super.setUp(); - tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"}; - tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"}; - buffer1 = new StringBuffer(); - - for (int i = 0; i < tokens1.length; i++) { - buffer1.append(tokens1[i]).append(' '); - } - buffer2 = new StringBuffer(); - for (int i = 0; i < tokens2.length; i++) { - buffer2.append(tokens2[i]).append(' '); - - } - } - - public void test() throws IOException { - - SinkTokenizer sink1 = new SinkTokenizer(null) { - public void add(Token t) { - if (t != null && t.term().equalsIgnoreCase("The")) { - super.add(t); - } - } - }; - TokenStream source = new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), sink1); - int i = 0; - final Token reusableToken = new Token(); - for (Token nextToken = source.next(reusableToken); nextToken != null; nextToken = source.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true); - i++; - } - assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length); - assertTrue("sink1 Size: " + sink1.getTokens().size() + " is not: " + 2, sink1.getTokens().size() == 2); - i = 0; - for (Token token = sink1.next(reusableToken); token != null; token = sink1.next(reusableToken)) { - assertTrue(token.term() + " is not equal to " + "The", token.term().equalsIgnoreCase("The") == true); - i++; - } - assertTrue(i + " does not equal: " + sink1.getTokens().size(), i == sink1.getTokens().size()); - } - - public void testMultipleSources() throws Exception { - SinkTokenizer theDetector = new SinkTokenizer(null) { - public void add(Token t) { - if (t != null && t.term().equalsIgnoreCase("The")) { - super.add(t); - } - } - }; - SinkTokenizer dogDetector = new SinkTokenizer(null) { - public void add(Token t) { - if (t != null && t.term().equalsIgnoreCase("Dogs")) { - super.add(t); - } - } - }; - TokenStream source1 = new CachingTokenFilter(new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), theDetector), dogDetector)); - TokenStream source2 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer2.toString())), theDetector), dogDetector); - int i = 0; - final Token reusableToken = new Token(); - for (Token nextToken = source1.next(reusableToken); nextToken != null; nextToken = source1.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true); - i++; - } - assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length); - assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 2, theDetector.getTokens().size() == 2); - assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 1, dogDetector.getTokens().size() == 1); - i = 0; - for (Token nextToken = source2.next(reusableToken); nextToken != null; nextToken = source2.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + tokens2[i], nextToken.term().equals(tokens2[i]) == true); - i++; - } - assertTrue(i + " does not equal: " + tokens2.length, i == tokens2.length); - assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 4, theDetector.getTokens().size() == 4); - assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 2, dogDetector.getTokens().size() == 2); - i = 0; - for (Token nextToken = theDetector.next(reusableToken); nextToken != null; nextToken = theDetector.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + "The", nextToken.term().equalsIgnoreCase("The") == true); - i++; - } - assertTrue(i + " does not equal: " + theDetector.getTokens().size(), i == theDetector.getTokens().size()); - i = 0; - for (Token nextToken = dogDetector.next(reusableToken); nextToken != null; nextToken = dogDetector.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + "Dogs", nextToken.term().equalsIgnoreCase("Dogs") == true); - i++; - } - assertTrue(i + " does not equal: " + dogDetector.getTokens().size(), i == dogDetector.getTokens().size()); - source1.reset(); - TokenStream lowerCasing = new LowerCaseFilter(source1); - i = 0; - for (Token nextToken = lowerCasing.next(reusableToken); nextToken != null; nextToken = lowerCasing.next(reusableToken)) { - assertTrue(nextToken.term() + " is not equal to " + tokens1[i].toLowerCase(), nextToken.term().equals(tokens1[i].toLowerCase()) == true); - i++; - } - assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length); - } - - /** - * Not an explicit test, just useful to print out some info on performance - * - * @throws Exception - */ - public void performance() throws Exception { - int[] tokCount = {100, 500, 1000, 2000, 5000, 10000}; - int[] modCounts = {1, 2, 5, 10, 20, 50, 100, 200, 500}; - for (int k = 0; k < tokCount.length; k++) { - StringBuffer buffer = new StringBuffer(); - System.out.println("-----Tokens: " + tokCount[k] + "-----"); - for (int i = 0; i < tokCount[k]; i++) { - buffer.append(English.intToEnglish(i).toUpperCase()).append(' '); - } - //make sure we produce the same tokens - ModuloSinkTokenizer sink = new ModuloSinkTokenizer(tokCount[k], 100); - final Token reusableToken = new Token(); - TokenStream stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink); - while (stream.next(reusableToken) != null) { - } - stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), 100); - List tmp = new ArrayList(); - for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) { - tmp.add(nextToken.clone()); - } - List sinkList = sink.getTokens(); - assertTrue("tmp Size: " + tmp.size() + " is not: " + sinkList.size(), tmp.size() == sinkList.size()); - for (int i = 0; i < tmp.size(); i++) { - Token tfTok = (Token) tmp.get(i); - Token sinkTok = (Token) sinkList.get(i); - assertTrue(tfTok.term() + " is not equal to " + sinkTok.term() + " at token: " + i, tfTok.term().equals(sinkTok.term()) == true); - } - //simulate two fields, each being analyzed once, for 20 documents - - for (int j = 0; j < modCounts.length; j++) { - int tfPos = 0; - long start = System.currentTimeMillis(); - for (int i = 0; i < 20; i++) { - stream = new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))); - for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) { - tfPos += nextToken.getPositionIncrement(); - } - stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), modCounts[j]); - for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) { - tfPos += nextToken.getPositionIncrement(); - } - } - long finish = System.currentTimeMillis(); - System.out.println("ModCount: " + modCounts[j] + " Two fields took " + (finish - start) + " ms"); - int sinkPos = 0; - //simulate one field with one sink - start = System.currentTimeMillis(); - for (int i = 0; i < 20; i++) { - sink = new ModuloSinkTokenizer(tokCount[k], modCounts[j]); - stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink); - for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) { - sinkPos += nextToken.getPositionIncrement(); - } - //System.out.println("Modulo--------"); - stream = sink; - for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) { - sinkPos += nextToken.getPositionIncrement(); - } - } - finish = System.currentTimeMillis(); - System.out.println("ModCount: " + modCounts[j] + " Tee fields took " + (finish - start) + " ms"); - assertTrue(sinkPos + " does not equal: " + tfPos, sinkPos == tfPos); - - } - System.out.println("- End Tokens: " + tokCount[k] + "-----"); - } - - } - - - class ModuloTokenFilter extends TokenFilter { - - int modCount; - - ModuloTokenFilter(TokenStream input, int mc) { - super(input); - modCount = mc; - } - - int count = 0; - - //return every 100 tokens - public Token next(final Token reusableToken) throws IOException { - Token nextToken = null; - for (nextToken = input.next(reusableToken); - nextToken != null && count % modCount != 0; - nextToken = input.next(reusableToken)) { - count++; - } - count++; - return nextToken; - } - } - - class ModuloSinkTokenizer extends SinkTokenizer { - int count = 0; - int modCount; - - - ModuloSinkTokenizer(int numToks, int mc) { - modCount = mc; - lst = new ArrayList(numToks % mc); - } - - public void add(Token t) { - if (t != null && count % modCount == 0) { - super.add(t); - } - count++; - } - } -} - diff --git a/src/test/org/apache/lucene/analysis/TestTokenStreamBWComp.java b/src/test/org/apache/lucene/analysis/TestTokenStreamBWComp.java deleted file mode 100644 index 67d13d9b562..00000000000 --- a/src/test/org/apache/lucene/analysis/TestTokenStreamBWComp.java +++ /dev/null @@ -1,393 +0,0 @@ -package org.apache.lucene.analysis; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.io.StringReader; - -import org.apache.lucene.index.Payload; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.tokenattributes.*; - -/** This class tests some special cases of backwards compatibility when using the new TokenStream API with old analyzers */ -public class TestTokenStreamBWComp extends LuceneTestCase { - - private static final String doc = "This is the new TokenStream api"; - private static final String[] stopwords = new String[] {"is", "the", "this"}; - private static final String[] results = new String[] {"new", "tokenstream", "api"}; - - public static class POSToken extends Token { - public static final int PROPERNOUN = 1; - public static final int NO_NOUN = 2; - - private int partOfSpeech; - - public void setPartOfSpeech(int pos) { - partOfSpeech = pos; - } - - public int getPartOfSpeech() { - return this.partOfSpeech; - } - } - - static class PartOfSpeechTaggingFilter extends TokenFilter { - - protected PartOfSpeechTaggingFilter(TokenStream input) { - super(input); - } - - public Token next() throws IOException { - Token t = input.next(); - if (t == null) return null; - - POSToken pt = new POSToken(); - pt.reinit(t); - if (pt.termLength() > 0) { - if (Character.isUpperCase(pt.termBuffer()[0])) { - pt.setPartOfSpeech(POSToken.PROPERNOUN); - } else { - pt.setPartOfSpeech(POSToken.NO_NOUN); - } - } - return pt; - } - - } - - static class PartOfSpeechAnnotatingFilter extends TokenFilter { - public final static byte PROPER_NOUN_ANNOTATION = 1; - - - protected PartOfSpeechAnnotatingFilter(TokenStream input) { - super(input); - } - - public Token next() throws IOException { - Token t = input.next(); - if (t == null) return null; - - if (t instanceof POSToken) { - POSToken pt = (POSToken) t; - if (pt.getPartOfSpeech() == POSToken.PROPERNOUN) { - pt.setPayload(new Payload(new byte[] {PROPER_NOUN_ANNOTATION})); - } - return pt; - } else { - return t; - } - } - - } - - // test the chain: The one and only term "TokenStream" should be declared as proper noun: - - public void testTeeSinkCustomTokenNewAPI() throws IOException { - testTeeSinkCustomToken(0); - } - - public void testTeeSinkCustomTokenOldAPI() throws IOException { - testTeeSinkCustomToken(1); - } - - public void testTeeSinkCustomTokenVeryOldAPI() throws IOException { - testTeeSinkCustomToken(2); - } - - private void testTeeSinkCustomToken(int api) throws IOException { - TokenStream stream = new WhitespaceTokenizer(new StringReader(doc)); - stream = new PartOfSpeechTaggingFilter(stream); - stream = new LowerCaseFilter(stream); - stream = new StopFilter(stream, stopwords); - - SinkTokenizer sink = new SinkTokenizer(); - TokenStream stream1 = new PartOfSpeechAnnotatingFilter(sink); - - stream = new TeeTokenFilter(stream, sink); - stream = new PartOfSpeechAnnotatingFilter(stream); - - switch (api) { - case 0: - consumeStreamNewAPI(stream); - consumeStreamNewAPI(stream1); - break; - case 1: - consumeStreamOldAPI(stream); - consumeStreamOldAPI(stream1); - break; - case 2: - consumeStreamVeryOldAPI(stream); - consumeStreamVeryOldAPI(stream1); - break; - } - } - - // test caching the special custom POSToken works in all cases - - public void testCachingCustomTokenNewAPI() throws IOException { - testTeeSinkCustomToken(0); - } - - public void testCachingCustomTokenOldAPI() throws IOException { - testTeeSinkCustomToken(1); - } - - public void testCachingCustomTokenVeryOldAPI() throws IOException { - testTeeSinkCustomToken(2); - } - - public void testCachingCustomTokenMixed() throws IOException { - testTeeSinkCustomToken(3); - } - - private void testCachingCustomToken(int api) throws IOException { - TokenStream stream = new WhitespaceTokenizer(new StringReader(doc)); - stream = new PartOfSpeechTaggingFilter(stream); - stream = new LowerCaseFilter(stream); - stream = new StopFilter(stream, stopwords); - stream = new CachingTokenFilter(stream); // <- the caching is done before the annotating! - stream = new PartOfSpeechAnnotatingFilter(stream); - - switch (api) { - case 0: - consumeStreamNewAPI(stream); - consumeStreamNewAPI(stream); - break; - case 1: - consumeStreamOldAPI(stream); - consumeStreamOldAPI(stream); - break; - case 2: - consumeStreamVeryOldAPI(stream); - consumeStreamVeryOldAPI(stream); - break; - case 3: - consumeStreamNewAPI(stream); - consumeStreamOldAPI(stream); - consumeStreamVeryOldAPI(stream); - consumeStreamNewAPI(stream); - consumeStreamVeryOldAPI(stream); - break; - } - } - - private static void consumeStreamNewAPI(TokenStream stream) throws IOException { - stream.reset(); - PayloadAttribute payloadAtt = (PayloadAttribute) stream.addAttribute(PayloadAttribute.class); - TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class); - - int i=0; - while (stream.incrementToken()) { - String term = termAtt.term(); - Payload p = payloadAtt.getPayload(); - if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) { - assertEquals("only TokenStream is a proper noun", "tokenstream", term); - } else { - assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term)); - } - assertEquals(results[i], term); - i++; - } - } - - private static void consumeStreamOldAPI(TokenStream stream) throws IOException { - stream.reset(); - Token reusableToken = new Token(); - - int i=0; - while ((reusableToken = stream.next(reusableToken)) != null) { - String term = reusableToken.term(); - Payload p = reusableToken.getPayload(); - if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) { - assertEquals("only TokenStream is a proper noun", "tokenstream", term); - } else { - assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term)); - } - assertEquals(results[i], term); - i++; - } - } - - private static void consumeStreamVeryOldAPI(TokenStream stream) throws IOException { - stream.reset(); - - Token token; - int i=0; - while ((token = stream.next()) != null) { - String term = token.term(); - Payload p = token.getPayload(); - if (p != null && p.getData().length == 1 && p.getData()[0] == PartOfSpeechAnnotatingFilter.PROPER_NOUN_ANNOTATION) { - assertEquals("only TokenStream is a proper noun", "tokenstream", term); - } else { - assertFalse("all other tokens (if this test fails, the special POSToken subclass is not correctly passed through the chain)", "tokenstream".equals(term)); - } - assertEquals(results[i], term); - i++; - } - } - - // test if tokenization fails, if only the new API is allowed and an old TokenStream is in the chain - public void testOnlyNewAPI() throws IOException { - TokenStream.setOnlyUseNewAPI(true); - try { - - // this should fail with UOE - try { - TokenStream stream = new WhitespaceTokenizer(new StringReader(doc)); - stream = new PartOfSpeechTaggingFilter(stream); // <-- this one is evil! - stream = new LowerCaseFilter(stream); - stream = new StopFilter(stream, stopwords); - while (stream.incrementToken()); - fail("If only the new API is allowed, this should fail with an UOE"); - } catch (UnsupportedOperationException uoe) { - assertEquals((PartOfSpeechTaggingFilter.class.getName()+" does not implement incrementToken() which is needed for onlyUseNewAPI."),uoe.getMessage()); - } - - // this should pass, as all core token streams support the new API - TokenStream stream = new WhitespaceTokenizer(new StringReader(doc)); - stream = new LowerCaseFilter(stream); - stream = new StopFilter(stream, stopwords); - while (stream.incrementToken()); - - // Test, if all attributes are implemented by their implementation, not Token/TokenWrapper - assertTrue("TermAttribute is not implemented by TermAttributeImpl", - stream.addAttribute(TermAttribute.class) instanceof TermAttributeImpl); - assertTrue("OffsetAttribute is not implemented by OffsetAttributeImpl", - stream.addAttribute(OffsetAttribute.class) instanceof OffsetAttributeImpl); - assertTrue("FlagsAttribute is not implemented by FlagsAttributeImpl", - stream.addAttribute(FlagsAttribute.class) instanceof FlagsAttributeImpl); - assertTrue("PayloadAttribute is not implemented by PayloadAttributeImpl", - stream.addAttribute(PayloadAttribute.class) instanceof PayloadAttributeImpl); - assertTrue("PositionIncrementAttribute is not implemented by PositionIncrementAttributeImpl", - stream.addAttribute(PositionIncrementAttribute.class) instanceof PositionIncrementAttributeImpl); - assertTrue("TypeAttribute is not implemented by TypeAttributeImpl", - stream.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl); - - // try to call old API, this should fail - try { - stream.reset(); - Token reusableToken = new Token(); - while ((reusableToken = stream.next(reusableToken)) != null); - fail("If only the new API is allowed, this should fail with an UOE"); - } catch (UnsupportedOperationException uoe) { - assertEquals("This TokenStream only supports the new Attributes API.", uoe.getMessage()); - } - try { - stream.reset(); - while (stream.next() != null); - fail("If only the new API is allowed, this should fail with an UOE"); - } catch (UnsupportedOperationException uoe) { - assertEquals("This TokenStream only supports the new Attributes API.", uoe.getMessage()); - } - - // Test if the wrapper API (onlyUseNewAPI==false) uses TokenWrapper - // as attribute instance. - // TokenWrapper encapsulates a Token instance that can be exchanged - // by another Token instance without changing the AttributeImpl instance - // itsself. - TokenStream.setOnlyUseNewAPI(false); - stream = new WhitespaceTokenizer(new StringReader(doc)); - assertTrue("TermAttribute is not implemented by TokenWrapper", - stream.addAttribute(TermAttribute.class) instanceof TokenWrapper); - assertTrue("OffsetAttribute is not implemented by TokenWrapper", - stream.addAttribute(OffsetAttribute.class) instanceof TokenWrapper); - assertTrue("FlagsAttribute is not implemented by TokenWrapper", - stream.addAttribute(FlagsAttribute.class) instanceof TokenWrapper); - assertTrue("PayloadAttribute is not implemented by TokenWrapper", - stream.addAttribute(PayloadAttribute.class) instanceof TokenWrapper); - assertTrue("PositionIncrementAttribute is not implemented by TokenWrapper", - stream.addAttribute(PositionIncrementAttribute.class) instanceof TokenWrapper); - assertTrue("TypeAttribute is not implemented by TokenWrapper", - stream.addAttribute(TypeAttribute.class) instanceof TokenWrapper); - - } finally { - TokenStream.setOnlyUseNewAPI(false); - } - } - - public void testOverridesAny() throws Exception { - try { - TokenStream stream = new WhitespaceTokenizer(new StringReader(doc)); - stream = new TokenFilter(stream) { - // we implement nothing, only un-abstract it - }; - stream = new LowerCaseFilter(stream); - stream = new StopFilter(stream, stopwords); - while (stream.incrementToken()); - fail("One TokenFilter does not override any of the required methods, so it should fail."); - } catch (UnsupportedOperationException uoe) { - assertTrue("invalid UOE message", uoe.getMessage().endsWith("does not implement any of incrementToken(), next(Token), next().")); - } - } - - public void testMixedOldApiConsumer() throws Exception { - // WhitespaceTokenizer is using incrementToken() API: - TokenStream stream = new WhitespaceTokenizer(new StringReader("foo bar moo maeh")); - - Token foo = new Token(); - foo = stream.next(foo); - Token bar = stream.next(); - assertEquals("foo", foo.term()); - assertEquals("bar", bar.term()); - - Token moo = stream.next(foo); - assertEquals("moo", moo.term()); - assertEquals("private 'bar' term should still be valid", "bar", bar.term()); - - // and now we also use incrementToken()... (very bad, but should work) - TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class); - assertTrue(stream.incrementToken()); - assertEquals("maeh", termAtt.term()); - assertEquals("private 'bar' term should still be valid", "bar", bar.term()); - } - - /* - * old api that cycles thru foo, bar, meh - */ - private class RoundRobinOldAPI extends TokenStream { - int count = 0; - String terms[] = { "foo", "bar", "meh" }; - - public Token next(Token reusableToken) throws IOException { - reusableToken.setTermBuffer(terms[count % terms.length]); - count++; - return reusableToken; - } - } - - public void testMixedOldApiConsumer2() throws Exception { - // RoundRobinOldAPI is using TokenStream(next) - TokenStream stream = new RoundRobinOldAPI(); - TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class); - - assertTrue(stream.incrementToken()); - Token bar = stream.next(); - assertEquals("foo", termAtt.term()); - assertEquals("bar", bar.term()); - - assertTrue(stream.incrementToken()); - assertEquals("meh", termAtt.term()); - assertEquals("private 'bar' term should still be valid", "bar", bar.term()); - - Token foo = stream.next(); - assertEquals("the term attribute should still be the same", "meh", termAtt.term()); - assertEquals("foo", foo.term()); - assertEquals("private 'bar' term should still be valid", "bar", bar.term()); - } - -} diff --git a/src/test/org/apache/lucene/document/TestBinaryDocument.java b/src/test/org/apache/lucene/document/TestBinaryDocument.java index 1450d2ffbce..e7dfc0fc986 100644 --- a/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ b/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -71,7 +71,7 @@ public class TestBinaryDocument extends LuceneTestCase writer.close(); /** open a reader and fetch the document */ - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); Document docFromReader = reader.document(0); assertTrue(docFromReader != null); @@ -117,7 +117,7 @@ public class TestBinaryDocument extends LuceneTestCase writer.close(); /** open a reader and fetch the document */ - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); Document docFromReader = reader.document(0); assertTrue(docFromReader != null); diff --git a/src/test/org/apache/lucene/document/TestDateTools.java b/src/test/org/apache/lucene/document/TestDateTools.java deleted file mode 100644 index b64ce1166bc..00000000000 --- a/src/test/org/apache/lucene/document/TestDateTools.java +++ /dev/null @@ -1,198 +0,0 @@ -package org.apache.lucene.document; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.TimeZone; -import java.util.Locale; - -import org.apache.lucene.util.LocalizedTestCase; -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class TestDateTools extends LocalizedTestCase { - - public void testStringToDate() throws ParseException { - - Date d = null; - d = DateTools.stringToDate("2004"); - assertEquals("2004-01-01 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705"); - assertEquals("2004-07-05 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("200407050910"); - assertEquals("2004-07-05 09:10:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705091055990"); - assertEquals("2004-07-05 09:10:55:990", isoFormat(d)); - - try { - d = DateTools.stringToDate("97"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("200401011235009999"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("aaaa"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - - } - - public void testStringtoTime() throws ParseException { - long time = DateTools.stringToTime("197001010000"); - Calendar cal = new GregorianCalendar(); - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - assertEquals(cal.getTime().getTime(), time); - cal.set(1980, 1, 2, // year=1980, month=february, day=2 - 11, 5, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - time = DateTools.stringToTime("198002021105"); - assertEquals(cal.getTime().getTime(), time); - } - - public void testDateAndTimetoString() throws ParseException { - Calendar cal = new GregorianCalendar(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - - String dateString; - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004", dateString); - assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH); - assertEquals("200402", dateString); - assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY); - assertEquals("20040203", dateString); - assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("2004020322", dateString); - assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE); - assertEquals("200402032208", dateString); - assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND); - assertEquals("20040203220856", dateString); - assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("20040203220856333", dateString); - assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString))); - - // date before 1970: - cal.set(1961, 2, 5, // year=1961, month=march(!), day=5 - 23, 9, 51); // hour, minute, second - cal.set(Calendar.MILLISECOND, 444); - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("19610305230951444", dateString); - assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("1961030523", dateString); - assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - // timeToString: - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101000000000", dateString); - - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 1, 2, 3); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101010203000", dateString); - } - - public void testRound() { - Calendar cal = new GregorianCalendar(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - Date date = cal.getTime(); - assertEquals("2004-02-03 22:08:56:333", isoFormat(date)); - - Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear)); - - Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH); - assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth)); - - Date dateDay = DateTools.round(date, DateTools.Resolution.DAY); - assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay)); - - Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR); - assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour)); - - Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE); - assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute)); - - Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND); - assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond)); - - Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond)); - - // long parameter: - long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong))); - - long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong))); - } - - private String isoFormat(Date date) { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US); - sdf.setTimeZone(TimeZone.getTimeZone("GMT")); - return sdf.format(date); - } - - public void testDateToolsUTC() throws Exception { - // Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London - long time = 1130630400; - try { - TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London")); - String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE); - String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE); - assertFalse("different times", d1.equals(d2)); - assertEquals("midnight", DateTools.stringToTime(d1), time*1000); - assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000); - } finally { - TimeZone.setDefault(null); - } - } - -} diff --git a/src/test/org/apache/lucene/document/TestDocument.java b/src/test/org/apache/lucene/document/TestDocument.java index 406c914024a..0de18c3530d 100644 --- a/src/test/org/apache/lucene/document/TestDocument.java +++ b/src/test/org/apache/lucene/document/TestDocument.java @@ -161,7 +161,7 @@ public class TestDocument extends LuceneTestCase writer.addDocument(makeDocumentWithFields()); writer.close(); - Searcher searcher = new IndexSearcher(dir); + Searcher searcher = new IndexSearcher(dir, true); // search for something that does exists Query query = new TermQuery(new Term("keyword", "test1")); @@ -236,7 +236,7 @@ public class TestDocument extends LuceneTestCase writer.addDocument(doc); writer.close(); - Searcher searcher = new IndexSearcher(dir); + Searcher searcher = new IndexSearcher(dir, true); Query query = new TermQuery(new Term("keyword", "test")); diff --git a/src/test/org/apache/lucene/document/TestNumberTools.java b/src/test/org/apache/lucene/document/TestNumberTools.java deleted file mode 100644 index e457b338617..00000000000 --- a/src/test/org/apache/lucene/document/TestNumberTools.java +++ /dev/null @@ -1,82 +0,0 @@ -package org.apache.lucene.document; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.util.LuceneTestCase; - -public class TestNumberTools extends LuceneTestCase { - public void testNearZero() { - for (int i = -100; i <= 100; i++) { - for (int j = -100; j <= 100; j++) { - subtestTwoLongs(i, j); - } - } - } - - public void testMax() { - // make sure the constants convert to their equivelents - assertEquals(Long.MAX_VALUE, NumberTools - .stringToLong(NumberTools.MAX_STRING_VALUE)); - assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools - .longToString(Long.MAX_VALUE)); - - // test near MAX, too - for (long l = Long.MAX_VALUE; l > Long.MAX_VALUE - 10000; l--) { - subtestTwoLongs(l, l - 1); - } - } - - public void testMin() { - // make sure the constants convert to their equivelents - assertEquals(Long.MIN_VALUE, NumberTools - .stringToLong(NumberTools.MIN_STRING_VALUE)); - assertEquals(NumberTools.MIN_STRING_VALUE, NumberTools - .longToString(Long.MIN_VALUE)); - - // test near MIN, too - for (long l = Long.MIN_VALUE; l < Long.MIN_VALUE + 10000; l++) { - subtestTwoLongs(l, l + 1); - } - } - - private static void subtestTwoLongs(long i, long j) { - // convert to strings - String a = NumberTools.longToString(i); - String b = NumberTools.longToString(j); - - // are they the right length? - assertEquals(NumberTools.STR_SIZE, a.length()); - assertEquals(NumberTools.STR_SIZE, b.length()); - - // are they the right order? - if (i < j) { - assertTrue(a.compareTo(b) < 0); - } else if (i > j) { - assertTrue(a.compareTo(b) > 0); - } else { - assertEquals(a, b); - } - - // can we convert them back to longs? - long i2 = NumberTools.stringToLong(a); - long j2 = NumberTools.stringToLong(b); - - assertEquals(i, i2); - assertEquals(j, j2); - } -} \ No newline at end of file diff --git a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java index adb4e26870a..ab9601c8b7e 100755 --- a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java +++ b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java @@ -358,7 +358,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { setUpDirs(dir, aux); - IndexReader reader = IndexReader.open(aux); + IndexReader reader = IndexReader.open(aux, false); for (int i = 0; i < 20; i++) { reader.deleteDocument(i); } @@ -396,14 +396,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { assertEquals(3, writer.getSegmentCount()); writer.close(); - IndexReader reader = IndexReader.open(aux); + IndexReader reader = IndexReader.open(aux, false); for (int i = 0; i < 27; i++) { reader.deleteDocument(i); } assertEquals(3, reader.numDocs()); reader.close(); - reader = IndexReader.open(aux2); + reader = IndexReader.open(aux2, false); for (int i = 0; i < 8; i++) { reader.deleteDocument(i); } @@ -449,7 +449,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { } private void verifyNumDocs(Directory dir, int numDocs) throws IOException { - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); assertEquals(numDocs, reader.maxDoc()); assertEquals(numDocs, reader.numDocs()); reader.close(); @@ -457,7 +457,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); TermDocs termDocs = reader.termDocs(term); int count = 0; while (termDocs.next()) diff --git a/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/src/test/org/apache/lucene/index/TestAtomicUpdate.java index f23289a66d6..9abe513e586 100644 --- a/src/test/org/apache/lucene/index/TestAtomicUpdate.java +++ b/src/test/org/apache/lucene/index/TestAtomicUpdate.java @@ -111,7 +111,7 @@ public class TestAtomicUpdate extends LuceneTestCase { } public void doWork() throws Throwable { - IndexReader r = IndexReader.open(directory); + IndexReader r = IndexReader.open(directory, true); assertEquals(100, r.numDocs()); r.close(); } @@ -138,7 +138,7 @@ public class TestAtomicUpdate extends LuceneTestCase { } writer.commit(); - IndexReader r = IndexReader.open(directory); + IndexReader r = IndexReader.open(directory, true); assertEquals(100, r.numDocs()); r.close(); diff --git a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java deleted file mode 100644 index 77b868c3911..00000000000 --- a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ /dev/null @@ -1,543 +0,0 @@ -package org.apache.lucene.index; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.BufferedOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Arrays; -import java.util.Enumeration; -import java.util.List; -import java.util.zip.ZipEntry; -import java.util.zip.ZipFile; - -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; - -/* - Verify we can read the pre-2.1 file format, do searches - against it, and add documents to it. -*/ - -public class TestBackwardsCompatibility extends LuceneTestCase -{ - - // Uncomment these cases & run them on an older Lucene - // version, to generate an index to test backwards - // compatibility. Then, cd to build/test/index.cfs and - // run "zip index..cfs.zip *"; cd to - // build/test/index.nocfs and run "zip - // index..nocfs.zip *". Then move those 2 zip - // files to your trunk checkout and add them to the - // oldNames array. - - /* - public void testCreatePreLocklessCFS() throws IOException { - createIndex("index.cfs", true); - } - - public void testCreatePreLocklessNoCFS() throws IOException { - createIndex("index.nocfs", false); - } - */ - - /* Unzips dirName + ".zip" --> dirName, removing dirName - first */ - public void unzip(String zipName, String destDirName) throws IOException { - - Enumeration entries; - ZipFile zipFile; - zipFile = new ZipFile(zipName + ".zip"); - - entries = zipFile.entries(); - - String dirName = fullDir(destDirName); - - File fileDir = new File(dirName); - rmDir(destDirName); - - fileDir.mkdir(); - - while (entries.hasMoreElements()) { - ZipEntry entry = (ZipEntry) entries.nextElement(); - - InputStream in = zipFile.getInputStream(entry); - OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName()))); - - byte[] buffer = new byte[8192]; - int len; - while((len = in.read(buffer)) >= 0) { - out.write(buffer, 0, len); - } - - in.close(); - out.close(); - } - - zipFile.close(); - } - - public void testCreateCFS() throws IOException { - String dirName = "testindex.cfs"; - createIndex(dirName, true); - rmDir(dirName); - } - - public void testCreateNoCFS() throws IOException { - String dirName = "testindex.nocfs"; - createIndex(dirName, true); - rmDir(dirName); - } - - final String[] oldNames = {"19.cfs", - "19.nocfs", - "20.cfs", - "20.nocfs", - "21.cfs", - "21.nocfs", - "22.cfs", - "22.nocfs", - "23.cfs", - "23.nocfs", - "24.cfs", - "24.nocfs", - }; - - public void testOptimizeOldIndex() throws IOException { - for(int i=0;i= 2.3 - hits = searcher.search(new TermQuery(new Term("utf8", "\u0000")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - hits = searcher.search(new TermQuery(new Term("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - hits = searcher.search(new TermQuery(new Term("utf8", "ab\ud917\udc17cd")), null, 1000).scoreDocs; - assertEquals(34, hits.length); - } - - searcher.close(); - dir.close(); - } - - private int compare(String name, String v) { - int v0 = Integer.parseInt(name.substring(0, 2)); - int v1 = Integer.parseInt(v); - return v0 - v1; - } - - /* Open pre-lockless index, add docs, do a delete & - * setNorm, and search */ - public void changeIndexWithAdds(String dirName, boolean autoCommit) throws IOException { - String origDirName = dirName; - dirName = fullDir(dirName); - - Directory dir = FSDirectory.open(new File(dirName)); - - // open writer - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); - - // add 10 docs - for(int i=0;i<10;i++) { - addDoc(writer, 35+i); - } - - // make sure writer sees right total -- writer seems not to know about deletes in .del? - final int expected; - if (compare(origDirName, "24") < 0) { - expected = 45; - } else { - expected = 46; - } - assertEquals("wrong doc count", expected, writer.docCount()); - writer.close(); - - // make sure searching sees right # hits - IndexSearcher searcher = new IndexSearcher(dir); - ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - Document d = searcher.doc(hits[0].doc); - assertEquals("wrong first document", "21", d.get("id")); - testHits(hits, 44, searcher.getIndexReader()); - searcher.close(); - - // make sure we can do delete & setNorm against this - // pre-lockless segment: - IndexReader reader = IndexReader.open(dir); - Term searchTerm = new Term("id", "6"); - int delCount = reader.deleteDocuments(searchTerm); - assertEquals("wrong delete count", 1, delCount); - reader.setNorm(22, "content", (float) 2.0); - reader.close(); - - // make sure they "took": - searcher = new IndexSearcher(dir); - hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); - assertEquals("wrong first document", "22", d.get("id")); - testHits(hits, 43, searcher.getIndexReader()); - searcher.close(); - - // optimize - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); - writer.optimize(); - writer.close(); - - searcher = new IndexSearcher(dir); - hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); - testHits(hits, 43, searcher.getIndexReader()); - assertEquals("wrong first document", "22", d.get("id")); - searcher.close(); - - dir.close(); - } - - /* Open pre-lockless index, add docs, do a delete & - * setNorm, and search */ - public void changeIndexNoAdds(String dirName, boolean autoCommit) throws IOException { - - dirName = fullDir(dirName); - - Directory dir = FSDirectory.open(new File(dirName)); - - // make sure searching sees right # hits - IndexSearcher searcher = new IndexSearcher(dir); - ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - assertEquals("wrong number of hits", 34, hits.length); - Document d = searcher.doc(hits[0].doc); - assertEquals("wrong first document", "21", d.get("id")); - searcher.close(); - - // make sure we can do a delete & setNorm against this - // pre-lockless segment: - IndexReader reader = IndexReader.open(dir); - Term searchTerm = new Term("id", "6"); - int delCount = reader.deleteDocuments(searchTerm); - assertEquals("wrong delete count", 1, delCount); - reader.setNorm(22, "content", (float) 2.0); - reader.close(); - - // make sure they "took": - searcher = new IndexSearcher(dir); - hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); - assertEquals("wrong first document", "22", d.get("id")); - testHits(hits, 33, searcher.getIndexReader()); - searcher.close(); - - // optimize - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); - writer.optimize(); - writer.close(); - - searcher = new IndexSearcher(dir); - hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); - assertEquals("wrong first document", "22", d.get("id")); - testHits(hits, 33, searcher.getIndexReader()); - searcher.close(); - - dir.close(); - } - - public void createIndex(String dirName, boolean doCFS) throws IOException { - - rmDir(dirName); - - dirName = fullDir(dirName); - - Directory dir = FSDirectory.open(new File(dirName)); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(doCFS); - writer.setMaxBufferedDocs(10); - - for(int i=0;i<35;i++) { - addDoc(writer, i); - } - assertEquals("wrong doc count", 35, writer.docCount()); - writer.close(); - - // open fresh writer so we get no prx file in the added segment - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(doCFS); - writer.setMaxBufferedDocs(10); - addNoProxDoc(writer); - writer.close(); - - // Delete one doc so we get a .del file: - IndexReader reader = IndexReader.open(dir); - Term searchTerm = new Term("id", "7"); - int delCount = reader.deleteDocuments(searchTerm); - assertEquals("didn't delete the right number of documents", 1, delCount); - - // Set one norm so we get a .s0 file: - reader.setNorm(21, "content", (float) 1.5); - reader.close(); - } - - /* Verifies that the expected file names were produced */ - - public void testExactFileNames() throws IOException { - - for(int pass=0;pass<2;pass++) { - - String outputDir = "lucene.backwardscompat0.index"; - rmDir(outputDir); - - try { - Directory dir = FSDirectory.open(new File(fullDir(outputDir))); - - boolean autoCommit = 0 == pass; - - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); - writer.setRAMBufferSizeMB(16.0); - for(int i=0;i<35;i++) { - addDoc(writer, i); - } - assertEquals("wrong doc count", 35, writer.docCount()); - writer.close(); - - // Delete one doc so we get a .del file: - IndexReader reader = IndexReader.open(dir); - Term searchTerm = new Term("id", "7"); - int delCount = reader.deleteDocuments(searchTerm); - assertEquals("didn't delete the right number of documents", 1, delCount); - - // Set one norm so we get a .s0 file: - reader.setNorm(21, "content", (float) 1.5); - reader.close(); - - // The numbering of fields can vary depending on which - // JRE is in use. On some JREs we see content bound to - // field 0; on others, field 1. So, here we have to - // figure out which field number corresponds to - // "content", and then set our expected file names below - // accordingly: - CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs"); - FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm"); - int contentFieldIndex = -1; - for(int i=0;i 0) { - s += "\n "; - } - s += l[i]; - } - return s; - } - - private void addDoc(IndexWriter writer, int id) throws IOException - { - Document doc = new Document(); - doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - writer.addDocument(doc); - } - - private void addNoProxDoc(IndexWriter writer) throws IOException { - Document doc = new Document(); - Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED); - f.setOmitTf(true); - doc.add(f); - f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO); - f.setOmitTf(true); - doc.add(f); - writer.addDocument(doc); - } - - private void rmDir(String dir) throws IOException { - File fileDir = new File(fullDir(dir)); - if (fileDir.exists()) { - File[] files = fileDir.listFiles(); - if (files != null) { - for (int i = 0; i < files.length; i++) { - files[i].delete(); - } - } - fileDir.delete(); - } - } - - public static String fullDir(String dirName) throws IOException { - return new File(System.getProperty("tempDir"), dirName).getCanonicalPath(); - } -} diff --git a/src/test/org/apache/lucene/index/TestCheckIndex.java b/src/test/org/apache/lucene/index/TestCheckIndex.java index 78ec8b0b43b..6b673bb6817 100644 --- a/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -43,7 +43,7 @@ public class TestCheckIndex extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); reader.deleteDocument(5); reader.close(); diff --git a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java index 2d07c3cc34b..5b3005c9baf 100644 --- a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java +++ b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java @@ -88,7 +88,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { } writer.close(); - IndexReader reader = IndexReader.open(directory); + IndexReader reader = IndexReader.open(directory, true); assertEquals(200, reader.numDocs()); reader.close(); directory.close(); @@ -131,7 +131,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { } writer.close(); - IndexReader reader = IndexReader.open(directory); + IndexReader reader = IndexReader.open(directory, true); // Verify that we did not lose any deletes... assertEquals(450, reader.numDocs()); reader.close(); @@ -207,7 +207,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { writer.close(false); - IndexReader reader = IndexReader.open(directory); + IndexReader reader = IndexReader.open(directory, true); assertEquals((1+iter)*182, reader.numDocs()); reader.close(); diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java index f7b5710ca14..9a56729370f 100644 --- a/src/test/org/apache/lucene/index/TestCrash.java +++ b/src/test/org/apache/lucene/index/TestCrash.java @@ -61,7 +61,7 @@ public class TestCrash extends LuceneTestCase { IndexWriter writer = initIndex(); MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); crash(writer); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() < 157); } @@ -73,7 +73,7 @@ public class TestCrash extends LuceneTestCase { writer = initIndex(dir); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() < 314); } @@ -94,7 +94,7 @@ public class TestCrash extends LuceneTestCase { dir.fileLength(l[i]) + " bytes"); */ - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); assertTrue(reader.numDocs() >= 157); } @@ -113,7 +113,7 @@ public class TestCrash extends LuceneTestCase { System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); assertEquals(157, reader.numDocs()); } @@ -132,7 +132,7 @@ public class TestCrash extends LuceneTestCase { for(int i=0;i 0) { - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); reader.close(); dir.deleteFile(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen)); gen--; @@ -392,7 +392,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Should undo our rollback: writer.rollback(); - IndexReader r = IndexReader.open(dir); + IndexReader r = IndexReader.open(dir, true); // Still optimized, still 11 docs assertTrue(r.isOptimized()); assertEquals(11, r.numDocs()); @@ -406,7 +406,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Now 8 because we made another commit assertEquals(8, IndexReader.listCommits(dir).size()); - r = IndexReader.open(dir); + r = IndexReader.open(dir, true); // Not optimized because we rolled it back, and now only // 10 docs assertTrue(!r.isOptimized()); @@ -418,7 +418,7 @@ public class TestDeletionPolicy extends LuceneTestCase writer.optimize(); writer.close(); - r = IndexReader.open(dir); + r = IndexReader.open(dir, true); assertTrue(r.isOptimized()); assertEquals(10, r.numDocs()); r.close(); @@ -430,7 +430,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Reader still sees optimized index, because writer // opened on the prior commit has not yet committed: - r = IndexReader.open(dir); + r = IndexReader.open(dir, true); assertTrue(r.isOptimized()); assertEquals(10, r.numDocs()); r.close(); @@ -438,7 +438,7 @@ public class TestDeletionPolicy extends LuceneTestCase writer.close(); // Now reader sees unoptimized index: - r = IndexReader.open(dir); + r = IndexReader.open(dir, true); assertTrue(!r.isOptimized()); assertEquals(10, r.numDocs()); r.close(); @@ -483,7 +483,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Simplistic check: just verify the index is in fact // readable: - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); reader.close(); dir.close(); @@ -531,7 +531,7 @@ public class TestDeletionPolicy extends LuceneTestCase long gen = SegmentInfos.getCurrentSegmentGeneration(dir); for(int i=0;i 1; @@ -501,7 +473,7 @@ public class TestIndexReaderClone extends LuceneTestCase { public void testLucene1516Bug() throws Exception { final Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.createIndex(dir1, false); - IndexReader r1 = IndexReader.open(dir1); + IndexReader r1 = IndexReader.open(dir1, false); r1.incRef(); IndexReader r2 = r1.clone(false); r1.deleteDocument(5); @@ -523,7 +495,7 @@ public class TestIndexReaderClone extends LuceneTestCase { doc.add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED)); w.addDocument(doc); w.close(); - IndexReader r1 = IndexReader.open(dir); + IndexReader r1 = IndexReader.open(dir, false); IndexReader r2 = r1.clone(false); r1.close(); r2.close(); diff --git a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java index 22dc3119c21..365df9d1e1e 100644 --- a/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java +++ b/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java @@ -148,10 +148,10 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { // try cloning and reopening the norms private void doTestNorms(Directory dir) throws IOException { addDocs(dir, 12, true); - IndexReader ir = IndexReader.open(dir); + IndexReader ir = IndexReader.open(dir, false); verifyIndex(ir); modifyNormsForF1(ir); - IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir);//ir.clone(); + IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir, false);//ir.clone(); verifyIndex(irc); modifyNormsForF1(irc); @@ -183,7 +183,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { public void testNormsRefCounting() throws IOException { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.createIndex(dir1, false); - IndexReader reader1 = IndexReader.open(dir1); + IndexReader reader1 = IndexReader.open(dir1, false); IndexReader reader2C = (IndexReader) reader1.clone(); SegmentReader segmentReader2C = SegmentReader.getOnlySegmentReader(reader2C); @@ -243,7 +243,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { } private void modifyNormsForF1(Directory dir) throws IOException { - IndexReader ir = IndexReader.open(dir); + IndexReader ir = IndexReader.open(dir, false); modifyNormsForF1(ir); } @@ -268,7 +268,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase { } private void verifyIndex(Directory dir) throws IOException { - IndexReader ir = IndexReader.open(dir); + IndexReader ir = IndexReader.open(dir, false); verifyIndex(ir); ir.close(); } diff --git a/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/src/test/org/apache/lucene/index/TestIndexReaderReopen.java index 29d7f88e5ab..af07f6fc8cd 100644 --- a/src/test/org/apache/lucene/index/TestIndexReaderReopen.java +++ b/src/test/org/apache/lucene/index/TestIndexReaderReopen.java @@ -62,7 +62,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } protected IndexReader openReader() throws IOException { - return IndexReader.open(dir1); + return IndexReader.open(dir1, false); } }); @@ -78,7 +78,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } protected IndexReader openReader() throws IOException { - return IndexReader.open(dir2); + return IndexReader.open(dir2, false); } }); @@ -100,8 +100,8 @@ public class TestIndexReaderReopen extends LuceneTestCase { protected IndexReader openReader() throws IOException { ParallelReader pr = new ParallelReader(); - pr.add(IndexReader.open(dir1)); - pr.add(IndexReader.open(dir2)); + pr.add(IndexReader.open(dir1, false)); + pr.add(IndexReader.open(dir2, false)); return pr; } @@ -123,11 +123,11 @@ public class TestIndexReaderReopen extends LuceneTestCase { protected IndexReader openReader() throws IOException { ParallelReader pr = new ParallelReader(); - pr.add(IndexReader.open(dir3)); - pr.add(IndexReader.open(dir4)); + pr.add(IndexReader.open(dir3, false)); + pr.add(IndexReader.open(dir4, false)); // Does not implement reopen, so // hits exception: - pr.add(new FilterIndexReader(IndexReader.open(dir3))); + pr.add(new FilterIndexReader(IndexReader.open(dir3, false))); return pr; } @@ -164,7 +164,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { private void doTestReopenWithCommit (Directory dir, boolean withReopen) throws IOException { IndexWriter iwriter = new IndexWriter(dir, new KeywordAnalyzer(), true, MaxFieldLength.LIMITED); iwriter.setMergeScheduler(new SerialMergeScheduler()); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); try { int M = 3; for (int i=0; i<4; i++) { @@ -194,7 +194,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } else { // recreate reader.close(); - reader = IndexReader.open(dir); + reader = IndexReader.open(dir, false); } } } finally { @@ -220,8 +220,8 @@ public class TestIndexReaderReopen extends LuceneTestCase { protected IndexReader openReader() throws IOException { return new MultiReader(new IndexReader[] - {IndexReader.open(dir1), - IndexReader.open(dir2)}); + {IndexReader.open(dir1, false), + IndexReader.open(dir2, false)}); } }); @@ -244,11 +244,11 @@ public class TestIndexReaderReopen extends LuceneTestCase { protected IndexReader openReader() throws IOException { return new MultiReader(new IndexReader[] - {IndexReader.open(dir3), - IndexReader.open(dir4), + {IndexReader.open(dir3, false), + IndexReader.open(dir4, false), // Does not implement reopen, so // hits exception: - new FilterIndexReader(IndexReader.open(dir3))}); + new FilterIndexReader(IndexReader.open(dir3, false))}); } }); @@ -280,12 +280,12 @@ public class TestIndexReaderReopen extends LuceneTestCase { protected IndexReader openReader() throws IOException { ParallelReader pr = new ParallelReader(); - pr.add(IndexReader.open(dir1)); - pr.add(IndexReader.open(dir2)); + pr.add(IndexReader.open(dir1, false)); + pr.add(IndexReader.open(dir2, false)); MultiReader mr = new MultiReader(new IndexReader[] { - IndexReader.open(dir3), IndexReader.open(dir4)}); + IndexReader.open(dir3, false), IndexReader.open(dir4, false)}); return new MultiReader(new IndexReader[] { - pr, mr, IndexReader.open(dir5)}); + pr, mr, IndexReader.open(dir5, false)}); } }); dir1.close(); @@ -347,7 +347,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { Directory dir1 = new MockRAMDirectory(); createIndex(dir1, true); - IndexReader reader0 = IndexReader.open(dir1); + IndexReader reader0 = IndexReader.open(dir1, false); assertRefCountEquals(1, reader0); assertTrue(reader0 instanceof DirectoryReader); @@ -357,7 +357,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } // delete first document, so that only one of the subReaders have to be re-opened - IndexReader modifier = IndexReader.open(dir1); + IndexReader modifier = IndexReader.open(dir1, false); modifier.deleteDocument(0); modifier.close(); @@ -376,7 +376,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } // delete first document, so that only one of the subReaders have to be re-opened - modifier = IndexReader.open(dir1); + modifier = IndexReader.open(dir1, false); modifier.deleteDocument(1); modifier.close(); @@ -454,10 +454,10 @@ public class TestIndexReaderReopen extends LuceneTestCase { Directory dir2 = new MockRAMDirectory(); createIndex(dir2, true); - IndexReader reader1 = IndexReader.open(dir1); + IndexReader reader1 = IndexReader.open(dir1, false); assertRefCountEquals(1, reader1); - IndexReader initReader2 = IndexReader.open(dir2); + IndexReader initReader2 = IndexReader.open(dir2, false); IndexReader multiReader1 = new MultiReader(new IndexReader[] {reader1, initReader2}, (mode == 0)); modifyIndex(0, dir2); assertRefCountEquals(1 + mode, reader1); @@ -525,12 +525,12 @@ public class TestIndexReaderReopen extends LuceneTestCase { Directory dir2 = new MockRAMDirectory(); createIndex(dir2, true); - IndexReader reader1 = IndexReader.open(dir1); + IndexReader reader1 = IndexReader.open(dir1, false); assertRefCountEquals(1, reader1); ParallelReader parallelReader1 = new ParallelReader(mode == 0); parallelReader1.add(reader1); - IndexReader initReader2 = IndexReader.open(dir2); + IndexReader initReader2 = IndexReader.open(dir2, false); parallelReader1.add(initReader2); modifyIndex(1, dir2); assertRefCountEquals(1 + mode, reader1); @@ -597,26 +597,26 @@ public class TestIndexReaderReopen extends LuceneTestCase { Directory dir1 = new MockRAMDirectory(); createIndex(dir1, false); - IndexReader reader1 = IndexReader.open(dir1); + IndexReader reader1 = IndexReader.open(dir1, false); SegmentReader segmentReader1 = SegmentReader.getOnlySegmentReader(reader1); - IndexReader modifier = IndexReader.open(dir1); + IndexReader modifier = IndexReader.open(dir1, false); modifier.deleteDocument(0); modifier.close(); IndexReader reader2 = reader1.reopen(); - modifier = IndexReader.open(dir1); + modifier = IndexReader.open(dir1, false); modifier.setNorm(1, "field1", 50); modifier.setNorm(1, "field2", 50); modifier.close(); IndexReader reader3 = reader2.reopen(); SegmentReader segmentReader3 = SegmentReader.getOnlySegmentReader(reader3); - modifier = IndexReader.open(dir1); + modifier = IndexReader.open(dir1, false); modifier.deleteDocument(2); modifier.close(); IndexReader reader4 = reader3.reopen(); - modifier = IndexReader.open(dir1); + modifier = IndexReader.open(dir1, false); modifier.deleteDocument(3); modifier.close(); @@ -697,11 +697,11 @@ public class TestIndexReaderReopen extends LuceneTestCase { final TestReopen test = new TestReopen() { protected void modifyIndex(int i) throws IOException { if (i % 3 == 0) { - IndexReader modifier = IndexReader.open(dir); + IndexReader modifier = IndexReader.open(dir, false); modifier.setNorm(i, "field1", 50); modifier.close(); } else if (i % 3 == 1) { - IndexReader modifier = IndexReader.open(dir); + IndexReader modifier = IndexReader.open(dir, false); modifier.deleteDocument(i % modifier.maxDoc()); modifier.close(); } else { @@ -712,12 +712,12 @@ public class TestIndexReaderReopen extends LuceneTestCase { } protected IndexReader openReader() throws IOException { - return IndexReader.open(dir); + return IndexReader.open(dir, false); } }; final List readers = Collections.synchronizedList(new ArrayList()); - IndexReader firstReader = IndexReader.open(dir); + IndexReader firstReader = IndexReader.open(dir, false); IndexReader reader = firstReader; final Random rnd = newRandom(); @@ -945,7 +945,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { w.close(); - IndexReader r = IndexReader.open(dir); + IndexReader r = IndexReader.open(dir, false); if (multiSegment) { assertTrue(r.getSequentialSubReaders().length > 1); } else { @@ -980,7 +980,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { break; } case 1: { - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); reader.setNorm(4, "field1", 123); reader.setNorm(44, "field2", 222); reader.setNorm(44, "field4", 22); @@ -1003,7 +1003,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { break; } case 4: { - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); reader.setNorm(5, "field1", 123); reader.setNorm(55, "field2", 222); reader.close(); @@ -1081,71 +1081,11 @@ public class TestIndexReaderReopen extends LuceneTestCase { indexDir = new File(tempDir, "IndexReaderReopen"); } - // LUCENE-1453 - public void testFSDirectoryReopen() throws CorruptIndexException, IOException { - Directory dir1 = FSDirectory.getDirectory(indexDir, null); - createIndex(dir1, false); - dir1.close(); - - IndexReader ir = IndexReader.open(indexDir); - modifyIndex(3, ir.directory()); - IndexReader newIr = ir.reopen(); - modifyIndex(3, newIr.directory()); - IndexReader newIr2 = newIr.reopen(); - modifyIndex(3, newIr2.directory()); - IndexReader newIr3 = newIr2.reopen(); - - ir.close(); - newIr.close(); - newIr2.close(); - - // shouldn't throw Directory AlreadyClosedException - modifyIndex(3, newIr3.directory()); - newIr3.close(); - } - - // LUCENE-1453 - public void testFSDirectoryReopen2() throws CorruptIndexException, IOException { - - String tempDir = System.getProperty("java.io.tmpdir"); - if (tempDir == null) - throw new IOException("java.io.tmpdir undefined, cannot run test"); - File indexDir2 = new File(tempDir, "IndexReaderReopen2"); - - Directory dir1 = FSDirectory.getDirectory(indexDir2); - createIndex(dir1, false); - - IndexReader lastReader = IndexReader.open(indexDir2); - - Random r = newRandom(); - for(int i=0;i<10;i++) { - int mod = r.nextInt(5); - modifyIndex(mod, lastReader.directory()); - IndexReader reader = lastReader.reopen(); - if (reader != lastReader) { - lastReader.close(); - lastReader = reader; - } - } - lastReader.close(); - - // Make sure we didn't pick up too many incRef's along - // the way -- this close should be the final close: - dir1.close(); - - try { - dir1.listAll(); - fail("did not hit AlreadyClosedException"); - } catch (AlreadyClosedException ace) { - // expected - } - } - public void testCloseOrig() throws Throwable { Directory dir = new MockRAMDirectory(); createIndex(dir, false); - IndexReader r1 = IndexReader.open(dir); - IndexReader r2 = IndexReader.open(dir); + IndexReader r1 = IndexReader.open(dir, false); + IndexReader r2 = IndexReader.open(dir, false); r2.deleteDocument(0); r2.close(); @@ -1169,7 +1109,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { modifyIndex(0, dir); // Get delete bitVector on 1st segment modifyIndex(5, dir); // Add a doc (2 segments) - IndexReader r1 = IndexReader.open(dir); // MSR + IndexReader r1 = IndexReader.open(dir, false); // MSR modifyIndex(5, dir); // Add another doc (3 segments) @@ -1200,7 +1140,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { createIndex(dir, false); // Get delete bitVector modifyIndex(0, dir); - IndexReader r1 = IndexReader.open(dir); + IndexReader r1 = IndexReader.open(dir, false); // Add doc: modifyIndex(5, dir); @@ -1250,7 +1190,7 @@ public class TestIndexReaderReopen extends LuceneTestCase { } writer.close(); - IndexReader r = IndexReader.open(dir); + IndexReader r = IndexReader.open(dir, false); assertEquals(0, r.numDocs()); assertEquals(4, r.maxDoc()); diff --git a/src/test/org/apache/lucene/index/TestIndexWriter.java b/src/test/org/apache/lucene/index/TestIndexWriter.java index aa1c019e7d5..35deaf12f3a 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -104,7 +104,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { writer.close(); // delete 40 documents - reader = IndexReader.open(dir); + reader = IndexReader.open(dir, false); for (i = 0; i < 40; i++) { reader.deleteDocument(i); } @@ -115,7 +115,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { assertEquals(100, writer.docCount()); writer.close(); - reader = IndexReader.open(dir); + reader = IndexReader.open(dir, true); assertEquals(100, reader.maxDoc()); assertEquals(60, reader.numDocs()); reader.close(); @@ -130,7 +130,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { writer.close(); // check that the index reader gives the same numbers. - reader = IndexReader.open(dir); + reader = IndexReader.open(dir, true); assertEquals(60, reader.maxDoc()); assertEquals(60, reader.numDocs()); reader.close(); @@ -202,7 +202,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { // Make sure starting index seems to be working properly: Term searchTerm = new Term("content", "aaa"); - IndexReader reader = IndexReader.open(startDir); + IndexReader reader = IndexReader.open(startDir, true); assertEquals("first docFreq", 57, reader.docFreq(searchTerm)); IndexSearcher searcher = new IndexSearcher(reader); @@ -315,7 +315,7 @@ public class TestIndexWriter extends BaseTokenStreamTestCase { } else if (1 == method) { IndexReader readers[] = new IndexReader[dirs.length]; for(int i=0;i - *

ABC-123, A-1, A-7, A-100, B-99999 - *

- *

Such values cannot be sorted as strings, since A-100 needs - * to come after A-7. - *

- *

It could be argued that the "ids" should be rewritten as - * A-0001, A-0100, etc. so they will sort as strings. That is - * a valid alternate way to solve it - but - * this is only supposed to be a simple test case. - *

- *

Created: Apr 21, 2004 5:34:47 PM - * - * - * @version $Id$ - * @since 1.4 - */ -public class SampleComparable -implements Comparable, Serializable { - - String string_part; - Integer int_part; - - public SampleComparable (String s) { - int i = s.indexOf ("-"); - string_part = s.substring (0, i); - int_part = new Integer (s.substring (i + 1)); - } - - public int compareTo (Object o) { - SampleComparable otherid = (SampleComparable) o; - int i = string_part.compareTo (otherid.string_part); - if (i == 0) return int_part.compareTo (otherid.int_part); - return i; - } - - public static SortComparatorSource getComparatorSource () { - return new SortComparatorSource () { - public ScoreDocComparator newComparator (final IndexReader reader, String fieldname) - throws IOException { - final String field = StringHelper.intern(fieldname); - final TermEnum enumerator = reader.terms (new Term (fieldname, "")); - try { - return new ScoreDocComparator () { - protected Comparable[] cachedValues = fillCache (reader, enumerator, field); - - public int compare (ScoreDoc i, ScoreDoc j) { - return cachedValues[i.doc].compareTo (cachedValues[j.doc]); - } - - public Comparable sortValue (ScoreDoc i) { - return cachedValues[i.doc]; - } - - public int sortType () { - return SortField.CUSTOM; - } - }; - } finally { - enumerator.close (); - } - } - - /** - * Returns an array of objects which represent that natural order - * of the term values in the given field. - * - * @param reader Terms are in this index. - * @param enumerator Use this to get the term values and TermDocs. - * @param fieldname Comparables should be for this field. - * @return Array of objects representing natural order of terms in field. - * @throws IOException If an error occurs reading the index. - */ - protected Comparable[] fillCache (IndexReader reader, TermEnum enumerator, String fieldname) - throws IOException { - final String field = StringHelper.intern(fieldname); - Comparable[] retArray = new Comparable[reader.maxDoc ()]; - if (retArray.length > 0) { - TermDocs termDocs = reader.termDocs (); - try { - if (enumerator.term () == null) { - throw new RuntimeException ("no terms in field " + field); - } - do { - Term term = enumerator.term (); - if (term.field () != field) break; - Comparable termval = getComparable (term.text ()); - termDocs.seek (enumerator); - while (termDocs.next ()) { - retArray[termDocs.doc ()] = termval; - } - } while (enumerator.next ()); - } finally { - termDocs.close (); - } - } - return retArray; - } - - Comparable getComparable (String termtext) { - return new SampleComparable (termtext); - } - }; - } - - private static final class InnerSortComparator extends SortComparator { - protected Comparable getComparable (String termtext) { - return new SampleComparable (termtext); - } - public int hashCode() { return this.getClass().getName().hashCode(); } - public boolean equals(Object that) { return this.getClass().equals(that.getClass()); } - }; - - public static SortComparator getComparator() { - return new InnerSortComparator(); - } -} diff --git a/src/test/org/apache/lucene/search/TestBoolean2.java b/src/test/org/apache/lucene/search/TestBoolean2.java index a538cdb9d31..0e02bcec394 100644 --- a/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/src/test/org/apache/lucene/search/TestBoolean2.java @@ -48,7 +48,7 @@ public class TestBoolean2 extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } private String[] docFields = { diff --git a/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java index af2358d3a29..e571ff5aed5 100644 --- a/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java +++ b/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java @@ -74,7 +74,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase { writer.optimize(); writer.close(); - r = IndexReader.open(index); + r = IndexReader.open(index, true); s = new IndexSearcher(r); //System.out.println("Set up " + getName()); diff --git a/src/test/org/apache/lucene/search/TestBooleanOr.java b/src/test/org/apache/lucene/search/TestBooleanOr.java index 5c14b18762f..11c63968322 100644 --- a/src/test/org/apache/lucene/search/TestBooleanOr.java +++ b/src/test/org/apache/lucene/search/TestBooleanOr.java @@ -155,6 +155,6 @@ public class TestBooleanOr extends LuceneTestCase { writer.close(); // - searcher = new IndexSearcher(rd); + searcher = new IndexSearcher(rd, true); } } diff --git a/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java b/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java index 799ca74f964..77fbe9d3c8a 100644 --- a/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java +++ b/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java @@ -89,7 +89,7 @@ public class TestBooleanPrefixQuery extends LuceneTestCase { } writer.close(); - reader = IndexReader.open(directory); + reader = IndexReader.open(directory, true); PrefixQuery query = new PrefixQuery(new Term("category", "foo")); rw1 = query.rewrite(reader); diff --git a/src/test/org/apache/lucene/search/TestBooleanScorer.java b/src/test/org/apache/lucene/search/TestBooleanScorer.java index 1ef2a133880..96ab5faa047 100644 --- a/src/test/org/apache/lucene/search/TestBooleanScorer.java +++ b/src/test/org/apache/lucene/search/TestBooleanScorer.java @@ -64,7 +64,7 @@ public class TestBooleanScorer extends LuceneTestCase query.add(booleanQuery1, BooleanClause.Occur.MUST); query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT); - IndexSearcher indexSearcher = new IndexSearcher(directory); + IndexSearcher indexSearcher = new IndexSearcher(directory, true); ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs; assertEquals("Number of matched documents", 2, hits.length); diff --git a/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java b/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index 12fc24db134..390f51b0038 100644 --- a/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ b/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -36,7 +36,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); MockFilter filter = new MockFilter(); CachingWrapperFilter cacher = new CachingWrapperFilter(filter); @@ -76,7 +76,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); // not cacheable: assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false); diff --git a/src/test/org/apache/lucene/search/TestDateFilter.java b/src/test/org/apache/lucene/search/TestDateFilter.java index a5b21a58f24..540882fea23 100644 --- a/src/test/org/apache/lucene/search/TestDateFilter.java +++ b/src/test/org/apache/lucene/search/TestDateFilter.java @@ -62,7 +62,7 @@ public class TestDateFilter writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); // filter that should preserve matches //DateFilter df1 = DateFilter.Before("datefield", now); @@ -123,7 +123,7 @@ public class TestDateFilter writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); // filter that should preserve matches //DateFilter df1 = DateFilter.After("datefield", now); diff --git a/src/test/org/apache/lucene/search/TestDateSort.java b/src/test/org/apache/lucene/search/TestDateSort.java index d7a1f2df67b..8358b16b3ef 100644 --- a/src/test/org/apache/lucene/search/TestDateSort.java +++ b/src/test/org/apache/lucene/search/TestDateSort.java @@ -70,7 +70,7 @@ public class TestDateSort extends LuceneTestCase { } public void testReverseDateSort() throws Exception { - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); // Create a Sort object. reverse is set to true. // problem occurs only with SortField.AUTO: diff --git a/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index 5b70fb952aa..a8a2c2d9654 100644 --- a/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ b/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -121,7 +121,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{ writer.close(); - r = IndexReader.open(index); + r = IndexReader.open(index, true); s = new IndexSearcher(r); s.setSimilarity(sim); } diff --git a/src/test/org/apache/lucene/search/TestDocBoost.java b/src/test/org/apache/lucene/search/TestDocBoost.java index c4bde416b1c..7855b429523 100644 --- a/src/test/org/apache/lucene/search/TestDocBoost.java +++ b/src/test/org/apache/lucene/search/TestDocBoost.java @@ -66,7 +66,7 @@ public class TestDocBoost extends LuceneTestCase { final float[] scores = new float[4]; - new IndexSearcher(store).search + new IndexSearcher(store, true).search (new TermQuery(new Term("field", "word")), new Collector() { private int base = 0; diff --git a/src/test/org/apache/lucene/search/TestExplanations.java b/src/test/org/apache/lucene/search/TestExplanations.java index 6b029f1227d..746b86981a1 100644 --- a/src/test/org/apache/lucene/search/TestExplanations.java +++ b/src/test/org/apache/lucene/search/TestExplanations.java @@ -70,7 +70,7 @@ public class TestExplanations extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } protected String[] docFields = { diff --git a/src/test/org/apache/lucene/search/TestFieldCache.java b/src/test/org/apache/lucene/search/TestFieldCache.java index 7f08af7ff26..bbe95ab9670 100644 --- a/src/test/org/apache/lucene/search/TestFieldCache.java +++ b/src/test/org/apache/lucene/search/TestFieldCache.java @@ -57,7 +57,7 @@ public class TestFieldCache extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - reader = IndexReader.open(directory); + reader = IndexReader.open(directory, true); } public void testInfoStream() throws Exception { diff --git a/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java index 55d71842998..1b3cbb1ee3b 100644 --- a/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java +++ b/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java @@ -49,7 +49,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testRangeFilterId() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int medId = ((maxId - minId) / 2); @@ -135,7 +135,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterRand() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); String minRP = pad(signedIndex.minR); @@ -198,7 +198,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterShorts() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -289,7 +289,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterInts() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -381,7 +381,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterLongs() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -475,7 +475,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterFloats() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -503,7 +503,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { public void testFieldCacheRangeFilterDoubles() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -545,7 +545,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { writer.deleteDocuments(new Term("id","0")); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); IndexSearcher search = new IndexSearcher(reader); assertTrue(reader.hasDeletions()); diff --git a/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java index f938ed7bf67..b550bb0a8b5 100644 --- a/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java +++ b/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java @@ -48,7 +48,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { } w.close(); - IndexReader reader = IndexReader.open(rd); + IndexReader reader = IndexReader.open(rd, true); IndexSearcher searcher = new IndexSearcher(reader); int numDocs = reader.numDocs(); ScoreDoc[] results; diff --git a/src/test/org/apache/lucene/search/TestFilteredSearch.java b/src/test/org/apache/lucene/search/TestFilteredSearch.java index 59a6e746920..473a89cb8c0 100644 --- a/src/test/org/apache/lucene/search/TestFilteredSearch.java +++ b/src/test/org/apache/lucene/search/TestFilteredSearch.java @@ -61,7 +61,7 @@ public class TestFilteredSearch extends LuceneTestCase { booleanQuery.add(new TermQuery(new Term(FIELD, "36")), BooleanClause.Occur.SHOULD); - IndexSearcher indexSearcher = new IndexSearcher(directory); + IndexSearcher indexSearcher = new IndexSearcher(directory, true); ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs; assertEquals("Number of matched documents", 1, hits.length); diff --git a/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/src/test/org/apache/lucene/search/TestFuzzyQuery.java index 868199afc6d..cbc2efdaa58 100644 --- a/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ b/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -45,7 +45,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("ddddd", writer); writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -168,7 +168,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("segment", writer); writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); FuzzyQuery query; // not similar enough: @@ -257,7 +257,7 @@ public class TestFuzzyQuery extends LuceneTestCase { addDoc("segment", writer); writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); Query query; // term not over 10 chars, so optimization shortcuts diff --git a/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 09feb03163e..25eab92e970 100644 --- a/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -48,7 +48,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase { addDoc("three four", iw, 300f); iw.close(); - IndexReader ir = IndexReader.open(dir); + IndexReader ir = IndexReader.open(dir, true); IndexSearcher is = new IndexSearcher(ir); ScoreDoc[] hits; diff --git a/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index ab90b3746c7..b3accef6152 100644 --- a/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ b/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -59,7 +59,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); // search for "blueberry pi*": MultiPhraseQuery query1 = new MultiPhraseQuery(); @@ -69,7 +69,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase query2.add(new Term("body", "strawberry")); LinkedList termsWithPrefix = new LinkedList(); - IndexReader ir = IndexReader.open(indexStore); + IndexReader ir = IndexReader.open(indexStore, true); // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; @@ -149,7 +149,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); // This query will be equivalent to +body:pie +body:"blue*" BooleanQuery q = new BooleanQuery(); q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST); @@ -175,7 +175,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase add("a note", "note", writer); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); // This query will be equivalent to +type:note +body:"a t*" BooleanQuery q = new BooleanQuery(); diff --git a/src/test/org/apache/lucene/search/TestMultiSearcher.java b/src/test/org/apache/lucene/search/TestMultiSearcher.java index fd5866ca0ac..3c19995e80e 100644 --- a/src/test/org/apache/lucene/search/TestMultiSearcher.java +++ b/src/test/org/apache/lucene/search/TestMultiSearcher.java @@ -109,8 +109,8 @@ public class TestMultiSearcher extends LuceneTestCase // building the searchables Searcher[] searchers = new Searcher[2]; // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index - searchers[0] = new IndexSearcher(indexStoreB); - searchers[1] = new IndexSearcher(indexStoreA); + searchers[0] = new IndexSearcher(indexStoreB, true); + searchers[1] = new IndexSearcher(indexStoreA, true); // creating the multiSearcher Searcher mSearcher = getMultiSearcherInstance(searchers); // performing the search @@ -138,8 +138,8 @@ public class TestMultiSearcher extends LuceneTestCase // building the searchables Searcher[] searchers2 = new Searcher[2]; // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index - searchers2[0] = new IndexSearcher(indexStoreB); - searchers2[1] = new IndexSearcher(indexStoreA); + searchers2[0] = new IndexSearcher(indexStoreB, true); + searchers2[1] = new IndexSearcher(indexStoreA, true); // creating the mulitSearcher MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2); // performing the same search @@ -171,7 +171,7 @@ public class TestMultiSearcher extends LuceneTestCase // deleting the document just added, this will cause a different exception to take place Term term = new Term("id", "doc1"); - IndexReader readerB = IndexReader.open(indexStoreB); + IndexReader readerB = IndexReader.open(indexStoreB, false); readerB.deleteDocuments(term); readerB.close(); @@ -183,8 +183,8 @@ public class TestMultiSearcher extends LuceneTestCase // building the searchables Searcher[] searchers3 = new Searcher[2]; - searchers3[0] = new IndexSearcher(indexStoreB); - searchers3[1] = new IndexSearcher(indexStoreA); + searchers3[0] = new IndexSearcher(indexStoreB, true); + searchers3[1] = new IndexSearcher(indexStoreA, true); // creating the mulitSearcher Searcher mSearcher3 = getMultiSearcherInstance(searchers3); // performing the same search @@ -241,8 +241,8 @@ public class TestMultiSearcher extends LuceneTestCase initIndex(ramDirectory1, 10, true, null); // documents with a single token "doc0", "doc1", etc... initIndex(ramDirectory2, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc... - indexSearcher1 = new IndexSearcher(ramDirectory1); - indexSearcher2 = new IndexSearcher(ramDirectory2); + indexSearcher1 = new IndexSearcher(ramDirectory1, true); + indexSearcher2 = new IndexSearcher(ramDirectory2, true); MultiSearcher searcher = getMultiSearcherInstance(new Searcher[]{indexSearcher1, indexSearcher2}); assertTrue("searcher is null and it shouldn't be", searcher != null); @@ -297,7 +297,7 @@ public class TestMultiSearcher extends LuceneTestCase initIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc... initIndex(ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc... - indexSearcher1=new IndexSearcher(ramDirectory1); + indexSearcher1=new IndexSearcher(ramDirectory1, true); indexSearcher1.setDefaultFieldSortScoring(true, true); hits=indexSearcher1.search(query, null, 1000).scoreDocs; @@ -325,9 +325,9 @@ public class TestMultiSearcher extends LuceneTestCase initIndex(ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc... initIndex(ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc... - indexSearcher1=new IndexSearcher(ramDirectory1); + indexSearcher1=new IndexSearcher(ramDirectory1, true); indexSearcher1.setDefaultFieldSortScoring(true, true); - indexSearcher2=new IndexSearcher(ramDirectory2); + indexSearcher2=new IndexSearcher(ramDirectory2, true); indexSearcher2.setDefaultFieldSortScoring(true, true); Searcher searcher=getMultiSearcherInstance(new Searcher[] { indexSearcher1, indexSearcher2 }); @@ -363,7 +363,7 @@ public class TestMultiSearcher extends LuceneTestCase public void testCustomSimilarity () throws IOException { RAMDirectory dir = new RAMDirectory(); initIndex(dir, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc... - IndexSearcher srchr = new IndexSearcher(dir); + IndexSearcher srchr = new IndexSearcher(dir, true); MultiSearcher msrchr = getMultiSearcherInstance(new Searcher[]{srchr}); Similarity customSimilarity = new DefaultSimilarity() { diff --git a/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java b/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java index 605649abfcc..99f6c62a655 100644 --- a/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java +++ b/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java @@ -125,8 +125,8 @@ public class TestMultiSearcherRanking extends LuceneTestCase { iw2.close(); Searchable[] s = new Searchable[2]; - s[0] = new IndexSearcher(d1); - s[1] = new IndexSearcher(d2); + s[0] = new IndexSearcher(d1, true); + s[1] = new IndexSearcher(d2, true); multiSearcher = new MultiSearcher(s); // create IndexSearcher which contains all documents @@ -136,7 +136,7 @@ public class TestMultiSearcherRanking extends LuceneTestCase { addCollection1(iw); addCollection2(iw); iw.close(); - singleSearcher = new IndexSearcher(d); + singleSearcher = new IndexSearcher(d, true); } private void addCollection1(IndexWriter iw) throws IOException { diff --git a/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java index 1588f4e6fcb..6f766825c83 100644 --- a/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java +++ b/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java @@ -146,7 +146,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testEqualScores() throws IOException { // NOTE: uses index build in *this* setUp - IndexReader reader = IndexReader.open(small); + IndexReader reader = IndexReader.open(small, true); IndexSearcher search = new IndexSearcher(reader); ScoreDoc[] result; @@ -175,7 +175,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testBoost() throws IOException { // NOTE: uses index build in *this* setUp - IndexReader reader = IndexReader.open(small); + IndexReader reader = IndexReader.open(small, true); IndexSearcher search = new IndexSearcher(reader); // test for correct application of query normalization @@ -243,7 +243,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testBooleanOrderUnAffected() throws IOException { // NOTE: uses index build in *this* setUp - IndexReader reader = IndexReader.open(small); + IndexReader reader = IndexReader.open(small, true); IndexSearcher search = new IndexSearcher(reader); // first do a regular TermRangeQuery which uses term expansion so @@ -274,7 +274,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testRangeQueryId() throws IOException { // NOTE: uses index build in *super* setUp - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int medId = ((maxId - minId) / 2); @@ -401,7 +401,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testRangeQueryIdCollating() throws IOException { // NOTE: uses index build in *super* setUp - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int medId = ((maxId - minId) / 2); @@ -484,7 +484,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { public void testRangeQueryRand() throws IOException { // NOTE: uses index build in *super* setUp - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); String minRP = pad(signedIndex.minR); @@ -547,7 +547,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { // NOTE: uses index build in *super* setUp // using the unsigned index because collation seems to ignore hyphens - IndexReader reader = IndexReader.open(unsignedIndex.index); + IndexReader reader = IndexReader.open(unsignedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); String minRP = pad(unsignedIndex.minR); @@ -624,7 +624,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { writer.optimize(); writer.close(); - IndexReader reader = IndexReader.open(farsiIndex); + IndexReader reader = IndexReader.open(farsiIndex, true); IndexSearcher search = new IndexSearcher(reader); // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in @@ -668,7 +668,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { writer.optimize(); writer.close(); - IndexReader reader = IndexReader.open(danishIndex); + IndexReader reader = IndexReader.open(danishIndex, true); IndexSearcher search = new IndexSearcher(reader); Collator c = Collator.getInstance(new Locale("da", "dk")); diff --git a/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java index e45d8daef05..43b41253813 100644 --- a/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java +++ b/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java @@ -62,7 +62,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase { IndexReader reader = null; try { - reader = IndexReader.open(directory); + reader = IndexReader.open(directory, true); for(int i = 1; i <= numThreads; i++) testTermPositionVectors(reader, i); diff --git a/src/test/org/apache/lucene/search/TestNot.java b/src/test/org/apache/lucene/search/TestNot.java index 0a9a089e5f5..7d3b5a1a864 100644 --- a/src/test/org/apache/lucene/search/TestNot.java +++ b/src/test/org/apache/lucene/search/TestNot.java @@ -47,7 +47,7 @@ public class TestNot extends LuceneTestCase { writer.optimize(); writer.close(); - Searcher searcher = new IndexSearcher(store); + Searcher searcher = new IndexSearcher(store, true); QueryParser parser = new QueryParser("field", new SimpleAnalyzer()); Query query = parser.parse("a NOT b"); //System.out.println(query); diff --git a/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java index 92e437a4612..e5b22b98303 100644 --- a/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java +++ b/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java @@ -70,7 +70,7 @@ public class TestPhrasePrefixQuery writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); //PhrasePrefixQuery query1 = new PhrasePrefixQuery(); MultiPhraseQuery query1 = new MultiPhraseQuery(); @@ -80,7 +80,7 @@ public class TestPhrasePrefixQuery query2.add(new Term("body", "strawberry")); LinkedList termsWithPrefix = new LinkedList(); - IndexReader ir = IndexReader.open(indexStore); + IndexReader ir = IndexReader.open(indexStore, true); // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; diff --git a/src/test/org/apache/lucene/search/TestPhraseQuery.java b/src/test/org/apache/lucene/search/TestPhraseQuery.java index 47bedf40552..0939aab579b 100644 --- a/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -77,7 +77,7 @@ public class TestPhraseQuery extends LuceneTestCase { writer.optimize(); writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); query = new PhraseQuery(); } @@ -209,7 +209,7 @@ public class TestPhraseQuery extends LuceneTestCase { writer.addDocument(doc); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); // valid exact phrase query PhraseQuery query = new PhraseQuery(); @@ -249,7 +249,7 @@ public class TestPhraseQuery extends LuceneTestCase { writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term("source", "marketing")); @@ -287,7 +287,7 @@ public class TestPhraseQuery extends LuceneTestCase { writer.optimize(); writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); termQuery = new TermQuery(new Term("contents","woo")); phraseQuery = new PhraseQuery(); @@ -338,7 +338,7 @@ public class TestPhraseQuery extends LuceneTestCase { writer.optimize(); writer.close(); - Searcher searcher = new IndexSearcher(directory); + Searcher searcher = new IndexSearcher(directory, true); PhraseQuery query = new PhraseQuery(); query.add(new Term("field", "firstname")); query.add(new Term("field", "lastname")); diff --git a/src/test/org/apache/lucene/search/TestPositionIncrement.java b/src/test/org/apache/lucene/search/TestPositionIncrement.java index df2ed36c88f..64b00e54e6d 100644 --- a/src/test/org/apache/lucene/search/TestPositionIncrement.java +++ b/src/test/org/apache/lucene/search/TestPositionIncrement.java @@ -92,7 +92,7 @@ public class TestPositionIncrement extends BaseTokenStreamTestCase { writer.close(); - IndexSearcher searcher = new IndexSearcher(store); + IndexSearcher searcher = new IndexSearcher(store, true); TermPositions pos = searcher.getIndexReader().termPositions(new Term("field", "1")); pos.next(); diff --git a/src/test/org/apache/lucene/search/TestPrefixFilter.java b/src/test/org/apache/lucene/search/TestPrefixFilter.java index a4398d5f117..96cf6ad7754 100644 --- a/src/test/org/apache/lucene/search/TestPrefixFilter.java +++ b/src/test/org/apache/lucene/search/TestPrefixFilter.java @@ -48,7 +48,7 @@ public class TestPrefixFilter extends LuceneTestCase { // PrefixFilter combined with ConstantScoreQuery PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers")); Query query = new ConstantScoreQuery(filter); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(4, hits.length); diff --git a/src/test/org/apache/lucene/search/TestPrefixQuery.java b/src/test/org/apache/lucene/search/TestPrefixQuery.java index 59db1ae7bbb..05b12a0fa87 100644 --- a/src/test/org/apache/lucene/search/TestPrefixQuery.java +++ b/src/test/org/apache/lucene/search/TestPrefixQuery.java @@ -45,7 +45,7 @@ public class TestPrefixQuery extends LuceneTestCase { writer.close(); PrefixQuery query = new PrefixQuery(new Term("category", "/Computers")); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("All documents in /Computers category and below", 3, hits.length); diff --git a/src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java b/src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java deleted file mode 100644 index 6a4c7c89bb1..00000000000 --- a/src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java +++ /dev/null @@ -1,182 +0,0 @@ -package org.apache.lucene.search; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.ConcurrentModificationException; - -import org.apache.lucene.util.LuceneTestCase; - -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Hits; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.RAMDirectory; - -/** - * Test Hits searches with interleaved deletions. - * - * See {@link http://issues.apache.org/jira/browse/LUCENE-1096}. - * @deprecated Hits will be removed in Lucene 3.0 - */ -public class TestSearchHitsWithDeletions extends LuceneTestCase { - - private static boolean VERBOSE = false; - private static final String TEXT_FIELD = "text"; - private static final int N = 16100; - - private static Directory directory; - - public void setUp() throws Exception { - super.setUp(); - // Create an index writer. - directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - for (int i=0; i0 && i%k==0)))) { - Document doc = hits.doc(id); - log("Deleting hit "+i+" - doc "+doc+" with id "+id); - reader.deleteDocument(id); - } - if (intermittent) { - // check internal behavior of Hits (go 50 ahead of getMoreDocs points because the deletions cause to use more of the available hits) - if (i==150 || i==450 || i==1650) { - assertTrue("Hit "+i+": hits should have checked for deletions in last call to getMoreDocs()",hits.debugCheckedForDeletions); - } else if (i==50 || i==250 || i==850) { - assertFalse("Hit "+i+": hits should have NOT checked for deletions in last call to getMoreDocs()",hits.debugCheckedForDeletions); - } - } - } - } catch (ConcurrentModificationException e) { - // this is the only valid exception, and only when deletng in front. - assertTrue(e.getMessage()+" not expected unless deleting hits that were not yet seen!",deleteInFront); - } - searcher.close(); - } - - private static Document createDocument(int id) { - Document doc = new Document(); - doc.add(new Field(TEXT_FIELD, "text of document"+id, Field.Store.YES, Field.Index.ANALYZED)); - return doc; - } - - private static void log (String s) { - if (VERBOSE) { - System.out.println(s); - } - } -} diff --git a/src/test/org/apache/lucene/search/TestSetNorm.java b/src/test/org/apache/lucene/search/TestSetNorm.java index 5417fb84813..f27e5dc41fd 100644 --- a/src/test/org/apache/lucene/search/TestSetNorm.java +++ b/src/test/org/apache/lucene/search/TestSetNorm.java @@ -52,7 +52,7 @@ public class TestSetNorm extends LuceneTestCase { writer.close(); // reset the boost of each instance of this document - IndexReader reader = IndexReader.open(store); + IndexReader reader = IndexReader.open(store, false); reader.setNorm(0, "field", 1.0f); reader.setNorm(1, "field", 2.0f); reader.setNorm(2, "field", 4.0f); @@ -62,7 +62,7 @@ public class TestSetNorm extends LuceneTestCase { // check that searches are ordered by this boost final float[] scores = new float[4]; - new IndexSearcher(store).search + new IndexSearcher(store, true).search (new TermQuery(new Term("field", "word")), new Collector() { private int base = 0; diff --git a/src/test/org/apache/lucene/search/TestSimilarity.java b/src/test/org/apache/lucene/search/TestSimilarity.java index 6f2f59861ae..60a66e15c37 100644 --- a/src/test/org/apache/lucene/search/TestSimilarity.java +++ b/src/test/org/apache/lucene/search/TestSimilarity.java @@ -67,7 +67,7 @@ public class TestSimilarity extends LuceneTestCase { writer.optimize(); writer.close(); - Searcher searcher = new IndexSearcher(store); + Searcher searcher = new IndexSearcher(store, true); searcher.setSimilarity(new SimpleSimilarity()); Term a = new Term("field", "a"); diff --git a/src/test/org/apache/lucene/search/TestSimpleExplanations.java b/src/test/org/apache/lucene/search/TestSimpleExplanations.java index 999b2ceb892..7f12ddbeb37 100644 --- a/src/test/org/apache/lucene/search/TestSimpleExplanations.java +++ b/src/test/org/apache/lucene/search/TestSimpleExplanations.java @@ -331,8 +331,8 @@ public class TestSimpleExplanations extends TestExplanations { Query query = parser.parse("handle:1"); Searcher[] searchers = new Searcher[2]; - searchers[0] = new IndexSearcher(indexStoreB); - searchers[1] = new IndexSearcher(indexStoreA); + searchers[0] = new IndexSearcher(indexStoreB, true); + searchers[1] = new IndexSearcher(indexStoreA, true); Searcher mSearcher = new MultiSearcher(searchers); ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs; diff --git a/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java index d8c3a951c84..92519036b17 100755 --- a/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java +++ b/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java @@ -122,7 +122,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { writer.addDocument(doc); writer.close(); - IndexSearcher searcher = new IndexSearcher(ramDir); + IndexSearcher searcher = new IndexSearcher(ramDir, true); TopDocs td = searcher.search(query,null,10); //System.out.println("slop: "+slop+" query: "+query+" doc: "+doc+" Expecting number of hits: "+expectedNumResults+" maxScore="+td.getMaxScore()); assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, td.totalHits); diff --git a/src/test/org/apache/lucene/search/TestSort.java b/src/test/org/apache/lucene/search/TestSort.java index a873f88a07f..da9ab2b4b83 100644 --- a/src/test/org/apache/lucene/search/TestSort.java +++ b/src/test/org/apache/lucene/search/TestSort.java @@ -624,19 +624,6 @@ public class TestSort extends LuceneTestCase implements Serializable { sort.setSort (new SortField ("i18n", new Locale("da", "dk"))); assertMatches (multiSearcher, queryY, sort, "BJDHF"); } - - // test a custom sort function - public void testCustomSorts() throws Exception { - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource())); - assertMatches (full, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true)); - assertMatches (full, queryY, sort, "HJDBF"); - SortComparator custom = SampleComparable.getComparator(); - sort.setSort (new SortField ("custom", custom)); - assertMatches (full, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", custom, true)); - assertMatches (full, queryY, sort, "HJDBF"); - } // test a variety of sorts using more than one searcher public void testMultiSort() throws Exception { diff --git a/src/test/org/apache/lucene/search/TestSpanQueryFilter.java b/src/test/org/apache/lucene/search/TestSpanQueryFilter.java index 134d91dc5d0..4b2104e70ee 100644 --- a/src/test/org/apache/lucene/search/TestSpanQueryFilter.java +++ b/src/test/org/apache/lucene/search/TestSpanQueryFilter.java @@ -50,7 +50,7 @@ public class TestSpanQueryFilter extends LuceneTestCase { } writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim())); SpanQueryFilter filter = new SpanQueryFilter(query); diff --git a/src/test/org/apache/lucene/search/TestStressSort.java b/src/test/org/apache/lucene/search/TestStressSort.java index a81e36cf10d..bcf09637c9d 100644 --- a/src/test/org/apache/lucene/search/TestStressSort.java +++ b/src/test/org/apache/lucene/search/TestStressSort.java @@ -165,20 +165,20 @@ public class TestStressSort extends LuceneTestCase { } } writer.close(); - searcherMultiSegment = new IndexSearcher(dir); + searcherMultiSegment = new IndexSearcher(dir, true); searcherMultiSegment.setDefaultFieldSortScoring(true, true); dir2 = new MockRAMDirectory(dir); writer = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); - searcherSingleSegment = new IndexSearcher(dir2); + searcherSingleSegment = new IndexSearcher(dir2, true); searcherSingleSegment.setDefaultFieldSortScoring(true, true); dir3 = new MockRAMDirectory(dir); writer = new IndexWriter(dir3, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.optimize(3); writer.close(); - searcherFewSegment = new IndexSearcher(dir3); + searcherFewSegment = new IndexSearcher(dir3, true); searcherFewSegment.setDefaultFieldSortScoring(true, true); } diff --git a/src/test/org/apache/lucene/search/TestTermRangeFilter.java b/src/test/org/apache/lucene/search/TestTermRangeFilter.java index 0a9fe3cf632..d544384840f 100644 --- a/src/test/org/apache/lucene/search/TestTermRangeFilter.java +++ b/src/test/org/apache/lucene/search/TestTermRangeFilter.java @@ -49,7 +49,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { public void testRangeFilterId() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); int medId = ((maxId - minId) / 2); @@ -131,7 +131,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { public void testRangeFilterIdCollating() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); Collator c = Collator.getInstance(Locale.ENGLISH); @@ -214,7 +214,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { public void testRangeFilterRand() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index); + IndexReader reader = IndexReader.open(signedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); String minRP = pad(signedIndex.minR); @@ -277,7 +277,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { public void testRangeFilterRandCollating() throws IOException { // using the unsigned index because collation seems to ignore hyphens - IndexReader reader = IndexReader.open(unsignedIndex.index); + IndexReader reader = IndexReader.open(unsignedIndex.index, true); IndexSearcher search = new IndexSearcher(reader); Collator c = Collator.getInstance(Locale.ENGLISH); @@ -354,7 +354,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { writer.optimize(); writer.close(); - IndexReader reader = IndexReader.open(farsiIndex); + IndexReader reader = IndexReader.open(farsiIndex, true); IndexSearcher search = new IndexSearcher(reader); Query q = new TermQuery(new Term("body","body")); @@ -398,7 +398,7 @@ public class TestTermRangeFilter extends BaseTestRangeFilter { writer.optimize(); writer.close(); - IndexReader reader = IndexReader.open(danishIndex); + IndexReader reader = IndexReader.open(danishIndex, true); IndexSearcher search = new IndexSearcher(reader); Query q = new TermQuery(new Term("body","body")); diff --git a/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/src/test/org/apache/lucene/search/TestTermRangeQuery.java index 383a71581a6..a9db305147a 100644 --- a/src/test/org/apache/lucene/search/TestTermRangeQuery.java +++ b/src/test/org/apache/lucene/search/TestTermRangeQuery.java @@ -48,19 +48,19 @@ public class TestTermRangeQuery extends LuceneTestCase { public void testExclusive() throws Exception { Query query = new TermRangeQuery("content", "A", "C", false, false); initializeIndex(new String[] {"A", "B", "C", "D"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D, only B in range", 1, hits.length); searcher.close(); initializeIndex(new String[] {"A", "B", "D"}); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,D, only B in range", 1, hits.length); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("C added, still only B in range", 1, hits.length); searcher.close(); @@ -70,14 +70,14 @@ public class TestTermRangeQuery extends LuceneTestCase { public void testDeprecatedCstrctors() throws IOException { Query query = new RangeQuery(null, new Term("content","C"), false); initializeIndex(new String[] {"A", "B", "C", "D"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D, only B in range", 2, hits.length); searcher.close(); query = new RangeQuery(new Term("content","C"),null, false); initializeIndex(new String[] {"A", "B", "C", "D"}); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D, only B in range", 1, hits.length); searcher.close(); @@ -87,19 +87,19 @@ public class TestTermRangeQuery extends LuceneTestCase { Query query = new TermRangeQuery("content", "A", "C", true, true); initializeIndex(new String[]{"A", "B", "C", "D"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D - A,B,C in range", 3, hits.length); searcher.close(); initializeIndex(new String[]{"A", "B", "D"}); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,D - A and B in range", 2, hits.length); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("C added - A, B, C in range", 3, hits.length); searcher.close(); @@ -154,19 +154,19 @@ public class TestTermRangeQuery extends LuceneTestCase { public void testExclusiveCollating() throws Exception { Query query = new TermRangeQuery("content", "A", "C", false, false, Collator.getInstance(Locale.ENGLISH)); initializeIndex(new String[] {"A", "B", "C", "D"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D, only B in range", 1, hits.length); searcher.close(); initializeIndex(new String[] {"A", "B", "D"}); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,D, only B in range", 1, hits.length); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("C added, still only B in range", 1, hits.length); searcher.close(); @@ -176,19 +176,19 @@ public class TestTermRangeQuery extends LuceneTestCase { Query query = new TermRangeQuery("content", "A", "C",true, true, Collator.getInstance(Locale.ENGLISH)); initializeIndex(new String[]{"A", "B", "C", "D"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,C,D - A,B,C in range", 3, hits.length); searcher.close(); initializeIndex(new String[]{"A", "B", "D"}); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("A,B,D - A and B in range", 2, hits.length); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("C added - A, B, C in range", 3, hits.length); searcher.close(); @@ -205,7 +205,7 @@ public class TestTermRangeQuery extends LuceneTestCase { // index Term below should NOT be returned by a TermRangeQuery with a Farsi // Collator (or an Arabic one for the case when Farsi is not supported). initializeIndex(new String[]{ "\u0633\u0627\u0628"}); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("The index Term should not be included.", 0, hits.length); @@ -225,7 +225,7 @@ public class TestTermRangeQuery extends LuceneTestCase { // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ], // but Danish collation does. initializeIndex(words); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("The index Term should be included.", 1, hits.length); @@ -318,7 +318,7 @@ public class TestTermRangeQuery extends LuceneTestCase { Query query = new TermRangeQuery("content", null, "C", false, false); initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); Hits hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("A,B,,C,D => A, B & are in range", 3, hits.length()); @@ -327,7 +327,7 @@ public class TestTermRangeQuery extends LuceneTestCase { searcher.close(); initializeIndex(new String[] {"A", "B", "", "D"}, analyzer); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("A,B,,D => A, B & are in range", 3, hits.length()); @@ -335,7 +335,7 @@ public class TestTermRangeQuery extends LuceneTestCase { //assertEquals("A,B,,D => A, B & are in range", 2, hits.length()); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("C added, still A, B & are in range", 3, hits.length()); @@ -350,7 +350,7 @@ public class TestTermRangeQuery extends LuceneTestCase { Analyzer analyzer = new SingleCharAnalyzer(); Query query = new TermRangeQuery("content", null, "C", true, true); initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); Hits hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("A,B,,C,D => A,B,,C in range", 4, hits.length()); @@ -358,7 +358,7 @@ public class TestTermRangeQuery extends LuceneTestCase { //assertEquals("A,B,,C,D => A,B,,C in range", 3, hits.length()); searcher.close(); initializeIndex(new String[]{"A", "B", "", "D"}, analyzer); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("A,B,,D - A, B and in range", 3, hits.length()); @@ -366,7 +366,7 @@ public class TestTermRangeQuery extends LuceneTestCase { //assertEquals("A,B,,D => A, B and in range", 2, hits.length()); searcher.close(); addDoc("C"); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(dir, true); hits = searcher.search(query); // When Lucene-38 is fixed, use the assert on the next line: assertEquals("C added => A,B,,C in range", 4, hits.length()); diff --git a/src/test/org/apache/lucene/search/TestTermScorer.java b/src/test/org/apache/lucene/search/TestTermScorer.java index c57933899d1..46f54629506 100644 --- a/src/test/org/apache/lucene/search/TestTermScorer.java +++ b/src/test/org/apache/lucene/search/TestTermScorer.java @@ -58,7 +58,7 @@ public class TestTermScorer extends LuceneTestCase writer.addDocument(doc); } writer.close(); - indexSearcher = new IndexSearcher(directory); + indexSearcher = new IndexSearcher(directory, false); indexReader = indexSearcher.getIndexReader(); diff --git a/src/test/org/apache/lucene/search/TestTermVectors.java b/src/test/org/apache/lucene/search/TestTermVectors.java index aba51bd1bdd..d6ab2dbb227 100644 --- a/src/test/org/apache/lucene/search/TestTermVectors.java +++ b/src/test/org/apache/lucene/search/TestTermVectors.java @@ -67,7 +67,7 @@ public class TestTermVectors extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } public void test() { @@ -101,7 +101,7 @@ public class TestTermVectors extends LuceneTestCase { doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.close(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, true); TermFreqVector[] v = reader.getTermFreqVectors(0); assertEquals(4, v.length); String[] expectedFields = new String[]{"a", "b", "c", "x"}; @@ -240,7 +240,7 @@ public class TestTermVectors extends LuceneTestCase { writer.addDocument(testDoc3); writer.addDocument(testDoc4); writer.close(); - IndexSearcher knownSearcher = new IndexSearcher(dir); + IndexSearcher knownSearcher = new IndexSearcher(dir, true); TermEnum termEnum = knownSearcher.reader.terms(); TermDocs termDocs = knownSearcher.reader.termDocs(); //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length); @@ -366,7 +366,7 @@ public class TestTermVectors extends LuceneTestCase { } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); Query query = new TermQuery(new Term("field", "hundred")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -398,7 +398,7 @@ public class TestTermVectors extends LuceneTestCase { writer.addDocument(doc); writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); Query query = new TermQuery(new Term("field", "one")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; diff --git a/src/test/org/apache/lucene/search/TestThreadSafe.java b/src/test/org/apache/lucene/search/TestThreadSafe.java index 77458fb0aca..fd222b6fccc 100755 --- a/src/test/org/apache/lucene/search/TestThreadSafe.java +++ b/src/test/org/apache/lucene/search/TestThreadSafe.java @@ -151,7 +151,7 @@ public class TestThreadSafe extends LuceneTestCase { // do many small tests so the thread locals go away inbetween for (int i=0; i<100; i++) { - ir1 = IndexReader.open(dir1); + ir1 = IndexReader.open(dir1, false); doTest(10,100); } } diff --git a/src/test/org/apache/lucene/search/TestTimeLimitedCollector.java b/src/test/org/apache/lucene/search/TestTimeLimitedCollector.java deleted file mode 100755 index 8637989bbe0..00000000000 --- a/src/test/org/apache/lucene/search/TestTimeLimitedCollector.java +++ /dev/null @@ -1,328 +0,0 @@ -package org.apache.lucene.search; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; -import org.apache.lucene.queryParser.QueryParser; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.LuceneTestCase; - -import java.io.IOException; -import java.util.BitSet; - -/** - * Tests the TimeLimitedCollector. This test checks (1) search - * correctness (regardless of timeout), (2) expected timeout behavior, - * and (3) a sanity test with multiple searching threads. - */ -public class TestTimeLimitedCollector extends LuceneTestCase { - private static final int SLOW_DOWN = 47; - private static final long TIME_ALLOWED = 17 * SLOW_DOWN; // so searches can find about 17 docs. - - // max time allowed is relaxed for multithreading tests. - // the multithread case fails when setting this to 1 (no slack) and launching many threads (>2000). - // but this is not a real failure, just noise. - private static final double MULTI_THREAD_SLACK = 7; - - private static final int N_DOCS = 3000; - private static final int N_THREADS = 50; - - private Searcher searcher; - private final String FIELD_NAME = "body"; - private Query query; - - public TestTimeLimitedCollector(String name) { - super(name); - } - - /** - * initializes searcher with a document set - */ - protected void setUp() throws Exception { - super.setUp(); - final String docText[] = { - "docThatNeverMatchesSoWeCanRequireLastDocCollectedToBeGreaterThanZero", - "one blah three", - "one foo three multiOne", - "one foobar three multiThree", - "blueberry pancakes", - "blueberry pie", - "blueberry strudel", - "blueberry pizza", - }; - Directory directory = new RAMDirectory(); - IndexWriter iw = new IndexWriter(directory, new WhitespaceAnalyzer(), true, MaxFieldLength.UNLIMITED); - - for (int i=0; i 0!", exceptionDoc > 0 ); - if (greedy) { - assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" != lastCollected="+lastCollected, exceptionDoc==lastCollected); - assertTrue("greedy, but no hits found!", myHc.hitCount() > 0 ); - } else { - assertTrue("greedy="+greedy+" exceptionDoc="+exceptionDoc+" not > lastCollected="+lastCollected, exceptionDoc>lastCollected); - } - - // verify that elapsed time at exception is within valid limits - assertEquals( timoutException.getTimeAllowed(), TIME_ALLOWED); - // a) Not too early - assertTrue ( "elapsed="+timoutException.getTimeElapsed()+" <= (allowed-resolution)="+(TIME_ALLOWED-TimeLimitedCollector.getResolution()), - timoutException.getTimeElapsed() > TIME_ALLOWED-TimeLimitedCollector.getResolution()); - // b) Not too late. - // This part is problematic in a busy test system, so we just print a warning. - // We already verified that a timeout occurred, we just can't be picky about how long it took. - if (timoutException.getTimeElapsed() > maxTime(multiThreaded)) { - System.out.println("Informative: timeout exceeded (no action required: most probably just " + - " because the test machine is slower than usual): " + - "lastDoc="+exceptionDoc+ - " ,&& allowed="+timoutException.getTimeAllowed() + - " ,&& elapsed="+timoutException.getTimeElapsed() + - " >= " + maxTimeStr(multiThreaded)); - } - } - - private long maxTime(boolean multiThreaded) { - long res = 2 * TimeLimitedCollector.getResolution() + TIME_ALLOWED + SLOW_DOWN; // some slack for less noise in this test - if (multiThreaded) { - res *= MULTI_THREAD_SLACK; // larger slack - } - return res; - } - - private String maxTimeStr(boolean multiThreaded) { - String s = - "( " + - "2*resolution + TIME_ALLOWED + SLOW_DOWN = " + - "2*" + TimeLimitedCollector.getResolution() + " + " + TIME_ALLOWED + " + " + SLOW_DOWN + - ")"; - if (multiThreaded) { - s = MULTI_THREAD_SLACK + " * "+s; - } - return maxTime(multiThreaded) + " = " + s; - } - - /** - * Test timeout behavior when resolution is modified. - */ - public void testModifyResolution() { - try { - // increase and test - long resolution = 20 * TimeLimitedCollector.DEFAULT_RESOLUTION; //400 - TimeLimitedCollector.setResolution(resolution); - assertEquals(resolution, TimeLimitedCollector.getResolution()); - doTestTimeout(false,true); - // decrease much and test - resolution = 5; - TimeLimitedCollector.setResolution(resolution); - assertEquals(resolution, TimeLimitedCollector.getResolution()); - doTestTimeout(false,true); - // return to default and test - resolution = TimeLimitedCollector.DEFAULT_RESOLUTION; - TimeLimitedCollector.setResolution(resolution); - assertEquals(resolution, TimeLimitedCollector.getResolution()); - doTestTimeout(false,true); - } finally { - TimeLimitedCollector.setResolution(TimeLimitedCollector.DEFAULT_RESOLUTION); - } - } - - /** - * Test correctness with multiple searching threads. - */ - public void testSearchMultiThreaded() throws Exception { - doTestMultiThreads(false); - } - - /** - * Test correctness with multiple searching threads. - */ - public void testTimeoutMultiThreaded() throws Exception { - doTestMultiThreads(true); - } - - private void doTestMultiThreads(final boolean withTimeout) throws Exception { - Thread [] threadArray = new Thread[N_THREADS]; - final BitSet success = new BitSet(N_THREADS); - for( int i = 0; i < threadArray.length; ++i ) { - final int num = i; - threadArray[num] = new Thread() { - public void run() { - if (withTimeout) { - doTestTimeout(true,true); - } else { - doTestSearch(); - } - synchronized(success) { - success.set(num); - } - } - }; - } - for( int i = 0; i < threadArray.length; ++i ) { - threadArray[i].start(); - } - for( int i = 0; i < threadArray.length; ++i ) { - threadArray[i].join(); - } - assertEquals("some threads failed!", N_THREADS,success.cardinality()); - } - - // counting hit collector that can slow down at collect(). - private class MyHitCollector extends HitCollector - { - private final BitSet bits = new BitSet(); - private int slowdown = 0; - private int lastDocCollected = -1; - - /** - * amount of time to wait on each collect to simulate a long iteration - */ - public void setSlowDown( int milliseconds ) { - slowdown = milliseconds; - } - - public void collect( final int docId, final float score ) { - if( slowdown > 0 ) { - try { - Thread.sleep(slowdown); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - throw new RuntimeException(ie); - } - } - assert docId >= 0: " doc=" + docId; - bits.set( docId ); - lastDocCollected = docId; - } - - public int hitCount() { - return bits.cardinality(); - } - - public int getLastDocCollected() { - return lastDocCollected; - } - } - -} - diff --git a/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java index 79de32172c0..47d01078d7d 100644 --- a/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java +++ b/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java @@ -79,7 +79,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase { add(docText[i%docText.length], iw); } iw.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); String qtxt = "one"; for (int i = 0; i < docText.length; i++) { diff --git a/src/test/org/apache/lucene/search/TestTopDocsCollector.java b/src/test/org/apache/lucene/search/TestTopDocsCollector.java index 624f725dd36..5bace9a2000 100644 --- a/src/test/org/apache/lucene/search/TestTopDocsCollector.java +++ b/src/test/org/apache/lucene/search/TestTopDocsCollector.java @@ -91,7 +91,7 @@ public class TestTopDocsCollector extends LuceneTestCase { private TopDocsCollector doSearch(int numResults) throws IOException { Query q = new MatchAllDocsQuery(); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); TopDocsCollector tdc = new MyTopsDocCollector(numResults); searcher.search(q, tdc); searcher.close(); diff --git a/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java b/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java index 3ab3f37a84d..6c70a6d8ef3 100644 --- a/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java +++ b/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java @@ -64,7 +64,7 @@ public class TestTopScoreDocCollector extends LuceneTestCase { bq.setMinimumNumberShouldMatch(1); try { - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); for (int i = 0; i < inOrder.length; i++) { TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]); assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName()); diff --git a/src/test/org/apache/lucene/search/TestWildcard.java b/src/test/org/apache/lucene/search/TestWildcard.java index 8254486c9e5..207bee72b15 100644 --- a/src/test/org/apache/lucene/search/TestWildcard.java +++ b/src/test/org/apache/lucene/search/TestWildcard.java @@ -65,7 +65,7 @@ public class TestWildcard */ public void testTermWithoutWildcard() throws IOException { RAMDirectory indexStore = getIndexStore("field", new String[]{"nowildcard", "nowildcardx"}); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); Query wq = new WildcardQuery(new Term("field", "nowildcard")); assertMatches(searcher, wq, 1); @@ -81,7 +81,7 @@ public class TestWildcard throws IOException { RAMDirectory indexStore = getIndexStore("body", new String[] {"metal", "metals"}); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); Query query1 = new TermQuery(new Term("body", "metal")); Query query2 = new WildcardQuery(new Term("body", "metal*")); Query query3 = new WildcardQuery(new Term("body", "m*tal")); @@ -120,7 +120,7 @@ public class TestWildcard throws IOException { RAMDirectory indexStore = getIndexStore("body", new String[] {"metal", "metals", "mXtals", "mXtXls"}); - IndexSearcher searcher = new IndexSearcher(indexStore); + IndexSearcher searcher = new IndexSearcher(indexStore, true); Query query1 = new WildcardQuery(new Term("body", "m?tal")); Query query2 = new WildcardQuery(new Term("body", "metal?")); Query query3 = new WildcardQuery(new Term("body", "metals?")); @@ -205,7 +205,7 @@ public class TestWildcard } iw.close(); - IndexSearcher searcher = new IndexSearcher(dir); + IndexSearcher searcher = new IndexSearcher(dir, true); // test queries that must find all for (int i = 0; i < matchAll.length; i++) { diff --git a/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java b/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java index 8670623c36d..a079945e69d 100755 --- a/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java +++ b/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java @@ -137,7 +137,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup { // Test that FieldScoreQuery returns docs with expected score. private void doTestCustomScore (String field, FieldScoreQuery.Type tp, double dboost) throws CorruptIndexException, Exception { float boost = (float) dboost; - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); FieldScoreQuery qValSrc = new FieldScoreQuery(field,tp); // a query that would score by the field QueryParser qp = new QueryParser(TEXT_FIELD,anlzr); String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup. diff --git a/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java b/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java index 8f8ec950642..d59996032dc 100755 --- a/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java +++ b/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java @@ -72,7 +72,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { // Test that FieldScoreQuery returns docs in expected order. private void doTestRank (String field, FieldScoreQuery.Type tp) throws CorruptIndexException, Exception { - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); Query q = new FieldScoreQuery(field,tp); log("test: "+q); QueryUtils.check(q,s); @@ -115,7 +115,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { // Test that FieldScoreQuery returns docs with expected score. private void doTestExactScore (String field, FieldScoreQuery.Type tp) throws CorruptIndexException, Exception { - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); Query q = new FieldScoreQuery(field,tp); TopDocs td = s.search(q,null,1000); assertEquals("All docs should be matched!",N_DOCS,td.totalHits); @@ -163,7 +163,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { expectedArrayTypes.put(FieldScoreQuery.Type.INT, new int[0]); expectedArrayTypes.put(FieldScoreQuery.Type.FLOAT, new float[0]); - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); Object innerArray = null; boolean warned = false; // print warning once. @@ -199,7 +199,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { } // verify new values are reloaded (not reused) for a new reader - s = new IndexSearcher(dir); + s = new IndexSearcher(dir, true); FieldScoreQuery q = new FieldScoreQuery(field,tp); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; assertEquals("All docs should be matched!",N_DOCS,h.length); diff --git a/src/test/org/apache/lucene/search/function/TestOrdValues.java b/src/test/org/apache/lucene/search/function/TestOrdValues.java index 6def674a93c..b343ecd1ff7 100644 --- a/src/test/org/apache/lucene/search/function/TestOrdValues.java +++ b/src/test/org/apache/lucene/search/function/TestOrdValues.java @@ -55,7 +55,7 @@ public class TestOrdValues extends FunctionTestSetup { // Test that queries based on reverse/ordFieldScore scores correctly private void doTestRank (String field, boolean inOrder) throws CorruptIndexException, Exception { - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); ValueSource vs; if (inOrder) { vs = new MultiValueSource(new OrdFieldSource(field)); @@ -98,7 +98,7 @@ public class TestOrdValues extends FunctionTestSetup { // Test that queries based on reverse/ordFieldScore returns docs with expected score. private void doTestExactScore (String field, boolean inOrder) throws CorruptIndexException, Exception { - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); ValueSource vs; if (inOrder) { vs = new OrdFieldSource(field); @@ -135,7 +135,7 @@ public class TestOrdValues extends FunctionTestSetup { // Test that values loaded for FieldScoreQuery are cached properly and consumes the proper RAM resources. private void doTestCaching (String field, boolean inOrder) throws CorruptIndexException, Exception { - IndexSearcher s = new IndexSearcher(dir); + IndexSearcher s = new IndexSearcher(dir, true); Object innerArray = null; boolean warned = false; // print warning once @@ -205,7 +205,7 @@ public class TestOrdValues extends FunctionTestSetup { } // verify new values are reloaded (not reused) for a new reader - s = new IndexSearcher(dir); + s = new IndexSearcher(dir, true); if (inOrder) { vs = new OrdFieldSource(field); } else { diff --git a/src/test/org/apache/lucene/search/payloads/PayloadHelper.java b/src/test/org/apache/lucene/search/payloads/PayloadHelper.java index c5f27b7584a..9a37b5407f4 100644 --- a/src/test/org/apache/lucene/search/payloads/PayloadHelper.java +++ b/src/test/org/apache/lucene/search/payloads/PayloadHelper.java @@ -116,7 +116,7 @@ public class PayloadHelper { //writer.optimize(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); searcher.setSimilarity(similarity); return searcher; } diff --git a/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java b/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java deleted file mode 100644 index 1c0b1bc6a87..00000000000 --- a/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java +++ /dev/null @@ -1,245 +0,0 @@ -package org.apache.lucene.search.payloads; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.io.Reader; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.LowerCaseTokenizer; -import org.apache.lucene.analysis.TokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.Payload; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.CheckHits; -import org.apache.lucene.search.DefaultSimilarity; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.spans.Spans; -import org.apache.lucene.search.spans.TermSpans; -import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.English; -import org.apache.lucene.util.LuceneTestCase; - -public class TestBoostingTermQuery extends LuceneTestCase { - private IndexSearcher searcher; - private BoostingSimilarity similarity = new BoostingSimilarity(); - private byte[] payloadField = new byte[]{1}; - private byte[] payloadMultiField1 = new byte[]{2}; - private byte[] payloadMultiField2 = new byte[]{4}; - - public TestBoostingTermQuery(String s) { - super(s); - } - - private class PayloadAnalyzer extends Analyzer { - - - public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new LowerCaseTokenizer(reader); - result = new PayloadFilter(result, fieldName); - return result; - } - } - - private class PayloadFilter extends TokenFilter { - String fieldName; - int numSeen = 0; - - PayloadAttribute payloadAtt; - - public PayloadFilter(TokenStream input, String fieldName) { - super(input); - this.fieldName = fieldName; - payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class); - } - - public boolean incrementToken() throws IOException { - boolean hasNext = input.incrementToken(); - if (hasNext) { - if (fieldName.equals("field")) { - payloadAtt.setPayload(new Payload(payloadField)); - } else if (fieldName.equals("multiField")) { - if (numSeen % 2 == 0) { - payloadAtt.setPayload(new Payload(payloadMultiField1)); - } else { - payloadAtt.setPayload(new Payload(payloadMultiField2)); - } - numSeen++; - } - return true; - } else { - return false; - } - } - } - - protected void setUp() throws Exception { - super.setUp(); - RAMDirectory directory = new RAMDirectory(); - PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer - = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setSimilarity(similarity); - //writer.infoStream = System.out; - for (int i = 0; i < 1000; i++) { - Document doc = new Document(); - Field noPayloadField = new Field(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED); - //noPayloadField.setBoost(0); - doc.add(noPayloadField); - doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); - } - writer.optimize(); - writer.close(); - - searcher = new IndexSearcher(directory); - searcher.setSimilarity(similarity); - } - - public void test() throws IOException { - BoostingTermQuery query = new BoostingTermQuery(new Term("field", "seventy")); - TopDocs hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100); - - //they should all have the exact same score, because they all contain seventy once, and we set - //all the other similarity factors to be 1 - - assertTrue(hits.getMaxScore() + " does not equal: " + 1, hits.getMaxScore() == 1); - for (int i = 0; i < hits.scoreDocs.length; i++) { - ScoreDoc doc = hits.scoreDocs[i]; - assertTrue(doc.score + " does not equal: " + 1, doc.score == 1); - } - CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true); - Spans spans = query.getSpans(searcher.getIndexReader()); - assertTrue("spans is null and it shouldn't be", spans != null); - assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans); - /*float score = hits.score(0); - for (int i =1; i < hits.length(); i++) - { - assertTrue("scores are not equal and they should be", score == hits.score(i)); - }*/ - - } - - public void testMultipleMatchesPerDoc() throws Exception { - BoostingTermQuery query = new BoostingTermQuery(new Term(PayloadHelper.MULTI_FIELD, "seventy")); - TopDocs hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("hits Size: " + hits.totalHits + " is not: " + 100, hits.totalHits == 100); - - //they should all have the exact same score, because they all contain seventy once, and we set - //all the other similarity factors to be 1 - - //System.out.println("Hash: " + seventyHash + " Twice Hash: " + 2*seventyHash); - assertTrue(hits.getMaxScore() + " does not equal: " + 3, hits.getMaxScore() == 3); - //there should be exactly 10 items that score a 3, all the rest should score a 2 - //The 10 items are: 70 + i*100 where i in [0-9] - int numTens = 0; - for (int i = 0; i < hits.scoreDocs.length; i++) { - ScoreDoc doc = hits.scoreDocs[i]; - if (doc.doc % 10 == 0) { - numTens++; - assertTrue(doc.score + " does not equal: " + 3, doc.score == 3); - } else { - assertTrue(doc.score + " does not equal: " + 2, doc.score == 2); - } - } - assertTrue(numTens + " does not equal: " + 10, numTens == 10); - CheckHits.checkExplanations(query, "field", searcher, true); - Spans spans = query.getSpans(searcher.getIndexReader()); - assertTrue("spans is null and it shouldn't be", spans != null); - assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans); - //should be two matches per document - int count = 0; - //100 hits times 2 matches per hit, we should have 200 in count - while (spans.next()) { - count++; - } - assertTrue(count + " does not equal: " + 200, count == 200); - } - - public void testNoMatch() throws Exception { - BoostingTermQuery query = new BoostingTermQuery(new Term(PayloadHelper.FIELD, "junk")); - TopDocs hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("hits Size: " + hits.totalHits + " is not: " + 0, hits.totalHits == 0); - - } - - public void testNoPayload() throws Exception { - BoostingTermQuery q1 = new BoostingTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "zero")); - BoostingTermQuery q2 = new BoostingTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "foo")); - BooleanClause c1 = new BooleanClause(q1, BooleanClause.Occur.MUST); - BooleanClause c2 = new BooleanClause(q2, BooleanClause.Occur.MUST_NOT); - BooleanQuery query = new BooleanQuery(); - query.add(c1); - query.add(c2); - TopDocs hits = searcher.search(query, null, 100); - assertTrue("hits is null and it shouldn't be", hits != null); - assertTrue("hits Size: " + hits.totalHits + " is not: " + 1, hits.totalHits == 1); - int[] results = new int[1]; - results[0] = 0;//hits.scoreDocs[0].doc; - CheckHits.checkHitCollector(query, PayloadHelper.NO_PAYLOAD_FIELD, searcher, results); - } - - // must be static for weight serialization tests - static class BoostingSimilarity extends DefaultSimilarity { - // TODO: Remove warning after API has been finalized - public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) { - //we know it is size 4 here, so ignore the offset/length - return payload[0]; - } - - - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - //Make everything else 1 so we see the effect of the payload - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - public float lengthNorm(String fieldName, int numTerms) { - return 1; - } - - public float queryNorm(float sumOfSquaredWeights) { - return 1; - } - - public float sloppyFreq(int distance) { - return 1; - } - - public float coord(int overlap, int maxOverlap) { - return 1; - } - - public float idf(int docFreq, int numDocs) { - return 1; - } - - public float tf(float freq) { - return freq == 0 ? 0 : 1; - } - } -} diff --git a/src/test/org/apache/lucene/search/spans/TestBasics.java b/src/test/org/apache/lucene/search/spans/TestBasics.java index 1524c62fafb..3c6c0d38791 100644 --- a/src/test/org/apache/lucene/search/spans/TestBasics.java +++ b/src/test/org/apache/lucene/search/spans/TestBasics.java @@ -65,7 +65,7 @@ public class TestBasics extends LuceneTestCase { writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } public void testTerm() throws Exception { diff --git a/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java index 467b69c1c63..f7fea8308cf 100644 --- a/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java +++ b/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java @@ -117,7 +117,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase { field("last", "jones") })); writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } public void tearDown() throws Exception { diff --git a/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java index e296aa34fa9..e056e717575 100644 --- a/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java +++ b/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java @@ -53,7 +53,7 @@ public class TestNearSpansOrdered extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } protected String[] docFields = { diff --git a/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java index d2f76118d84..0369819f7c8 100644 --- a/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java +++ b/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java @@ -125,7 +125,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); searcher.setSimilarity(similarity); return searcher; @@ -261,7 +261,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.addDocument(doc); writer.close(); - IndexSearcher is = new IndexSearcher(directory); + IndexSearcher is = new IndexSearcher(directory, true); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); @@ -295,7 +295,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.addDocument(doc); writer.close(); - IndexSearcher is = new IndexSearcher(directory); + IndexSearcher is = new IndexSearcher(directory, true); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); @@ -329,7 +329,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.addDocument(doc); writer.close(); - IndexSearcher is = new IndexSearcher(directory); + IndexSearcher is = new IndexSearcher(directory, true); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); @@ -370,7 +370,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); IndexReader reader = searcher.getIndexReader(); PayloadSpanUtil psu = new PayloadSpanUtil(reader); @@ -439,7 +439,7 @@ public class TestPayloadSpans extends LuceneTestCase { writer.close(); - IndexSearcher searcher = new IndexSearcher(directory); + IndexSearcher searcher = new IndexSearcher(directory, true); return searcher; } diff --git a/src/test/org/apache/lucene/search/spans/TestSpans.java b/src/test/org/apache/lucene/search/spans/TestSpans.java index 8bb6397c9c9..9e8d57e9db3 100644 --- a/src/test/org/apache/lucene/search/spans/TestSpans.java +++ b/src/test/org/apache/lucene/search/spans/TestSpans.java @@ -56,7 +56,7 @@ public class TestSpans extends LuceneTestCase { writer.addDocument(doc); } writer.close(); - searcher = new IndexSearcher(directory); + searcher = new IndexSearcher(directory, true); } private String[] docFields = { @@ -459,7 +459,7 @@ public class TestSpans extends LuceneTestCase { writer.close(); // Get searcher - final IndexReader reader = IndexReader.open(dir); + final IndexReader reader = IndexReader.open(dir, true); final IndexSearcher searcher = new IndexSearcher(reader); // Control (make sure docs indexed) diff --git a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java index 904448e695c..afa3667ff33 100644 --- a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java +++ b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java @@ -61,7 +61,7 @@ public class TestSpansAdvanced extends LuceneTestCase { addDocument(writer, "3", "I think it should work."); addDocument(writer, "4", "I think it should work."); writer.close(); - searcher = new IndexSearcher(mDirectory); + searcher = new IndexSearcher(mDirectory, true); } protected void tearDown() throws Exception { diff --git a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java index ba12956b258..212652473fb 100644 --- a/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java +++ b/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java @@ -47,7 +47,7 @@ public class TestSpansAdvanced2 extends TestSpansAdvanced { writer.close(); // re-open the searcher since we added more docs - searcher2 = new IndexSearcher(mDirectory); + searcher2 = new IndexSearcher(mDirectory, true); } /** @@ -56,7 +56,7 @@ public class TestSpansAdvanced2 extends TestSpansAdvanced { * @throws Exception */ public void testVerifyIndex() throws Exception { - final IndexReader reader = IndexReader.open(mDirectory); + final IndexReader reader = IndexReader.open(mDirectory, true); assertEquals(8, reader.numDocs()); reader.close(); } diff --git a/src/test/org/apache/lucene/store/MockRAMDirectory.java b/src/test/org/apache/lucene/store/MockRAMDirectory.java index 6d87be05d44..a9e3ddd99a8 100644 --- a/src/test/org/apache/lucene/store/MockRAMDirectory.java +++ b/src/test/org/apache/lucene/store/MockRAMDirectory.java @@ -67,18 +67,10 @@ public class MockRAMDirectory extends RAMDirectory { super(); init(); } - public MockRAMDirectory(String dir) throws IOException { - super(dir); - init(); - } public MockRAMDirectory(Directory dir) throws IOException { super(dir); init(); } - public MockRAMDirectory(File dir) throws IOException { - super(dir); - init(); - } /** If set to true, we throw an IOException if the same * file is opened by createOutput, ever. */ diff --git a/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/src/test/org/apache/lucene/store/TestBufferedIndexInput.java index 79022c53031..a8e1beb1551 100755 --- a/src/test/org/apache/lucene/store/TestBufferedIndexInput.java +++ b/src/test/org/apache/lucene/store/TestBufferedIndexInput.java @@ -256,7 +256,7 @@ public class TestBufferedIndexInput extends LuceneTestCase { dir.allIndexInputs.clear(); - IndexReader reader = IndexReader.open(dir); + IndexReader reader = IndexReader.open(dir, false); Term aaa = new Term("content", "aaa"); Term bbb = new Term("content", "bbb"); Term ccc = new Term("content", "ccc"); diff --git a/src/test/org/apache/lucene/store/TestLockFactory.java b/src/test/org/apache/lucene/store/TestLockFactory.java index a0206def9fc..a29fae1c7fb 100755 --- a/src/test/org/apache/lucene/store/TestLockFactory.java +++ b/src/test/org/apache/lucene/store/TestLockFactory.java @@ -297,38 +297,6 @@ public class TestLockFactory extends LuceneTestCase { _TestUtil.rmDir(indexDirName); } - // Verify: if I try to getDirectory() with two different locking implementations, I get an IOException - public void testFSDirectoryDifferentLockFactory() throws IOException { - File indexDirName = _TestUtil.getTempDir("index.TestLockFactory5"); - - LockFactory lf = new SingleInstanceLockFactory(); - FSDirectory fs1 = FSDirectory.getDirectory(indexDirName, lf); - - // Different lock factory instance should hit IOException: - try { - FSDirectory.getDirectory(indexDirName, new SingleInstanceLockFactory()); - fail("Should have hit an IOException because LockFactory instances differ"); - } catch (IOException e) { - } - - FSDirectory fs2 = null; - - // Same lock factory instance should not: - try { - fs2 = FSDirectory.getDirectory(indexDirName, lf); - } catch (IOException e) { - e.printStackTrace(System.out); - fail("Should not have hit an IOException because LockFactory instances are the same"); - } - - fs1.close(); - if (fs2 != null) { - fs2.close(); - } - // Cleanup - _TestUtil.rmDir(indexDirName); - } - // Verify: do stress test, by opening IndexReaders and // IndexWriters over & over in 2 threads and making sure // no unexpected exceptions are raised: @@ -499,7 +467,7 @@ public class TestLockFactory extends LuceneTestCase { Query query = new TermQuery(new Term("content", "aaa")); for(int i=0;i