From 7641c23cefccfa957186f3dbd2056efe3e5ebf50 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 10 Mar 2010 17:53:46 +0000 Subject: [PATCH] LUCENE-2294: cutover to IndexWriterConfig object for settings to IW git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@921480 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 9 + .../query/QueryAutoStopWordAnalyzerTest.java | 3 +- .../shingle/ShingleAnalyzerWrapperTest.java | 3 +- .../java/org/apache/lucene/ant/IndexTask.java | 15 +- .../byTask/tasks/CreateIndexTask.java | 12 +- .../benchmark/byTask/tasks/OpenIndexTask.java | 15 +- .../benchmark/byTask/TestPerfTasksLogic.java | 29 +- .../vectorhighlight/FieldTermStack.java | 6 +- .../vectorhighlight/AbstractTestCase.java | 11 +- .../SimpleFragmentsBuilderTest.java | 7 +- .../highlight/HighlighterPhraseTest.java | 13 +- .../search/highlight/HighlighterTest.java | 82 +-- .../store/instantiated/TestEmptyIndex.java | 15 +- .../store/instantiated/TestIndicesEquals.java | 9 +- .../store/instantiated/TestSerialization.java | 4 +- .../TestUnoptimizedReaderOnConstructor.java | 9 +- .../lucli/src/java/lucli/LuceneMethods.java | 6 +- .../lucene/index/memory/MemoryIndexTest.java | 3 +- .../lucene/index/MultiPassIndexSplitter.java | 7 +- .../apache/lucene/misc/IndexMergeTool.java | 7 +- .../lucene/index/TestFieldNormModifier.java | 5 +- .../lucene/index/TestIndexSplitter.java | 5 +- .../index/TestMultiPassIndexSplitter.java | 5 +- .../lucene/index/TestTermVectorAccessor.java | 2 +- .../apache/lucene/misc/ChainedFilterTest.java | 11 +- .../lucene/misc/TestLengthNormModifier.java | 5 +- .../complexPhrase/TestComplexPhraseQuery.java | 4 +- .../lucene/search/BooleanFilterTest.java | 4 +- .../lucene/search/DuplicateFilterTest.java | 3 +- .../lucene/search/FuzzyLikeThisQueryTest.java | 5 +- .../apache/lucene/search/TermsFilterTest.java | 8 +- .../search/similar/TestMoreLikeThis.java | 5 +- .../standard/TestMultiFieldQPHelper.java | 4 +- .../TestMultiFieldQueryParserWrapper.java | 6 +- .../queryParser/standard/TestQPHelper.java | 9 +- .../standard/TestQueryParserWrapper.java | 3 + .../lucene/search/regex/TestRegexQuery.java | 5 +- .../search/regex/TestSpanRegexQuery.java | 14 +- .../TestRemoteCachingWrapperFilter.java | 6 +- .../lucene/search/TestRemoteSearchable.java | 4 +- .../apache/lucene/search/TestRemoteSort.java | 9 +- .../lucene/spatial/tier/TestCartesian.java | 4 +- .../lucene/spatial/tier/TestDistance.java | 4 +- .../lucene/search/spell/SpellChecker.java | 17 +- .../search/spell/TestLuceneDictionary.java | 4 +- .../lucene/search/spell/TestSpellChecker.java | 4 +- .../surround/query/SingleFieldTestDb.java | 8 +- .../lucene/swing/models/ListSearcher.java | 5 +- .../lucene/swing/models/TableSearcher.java | 5 +- .../org/apache/lucene/wordnet/Syns2Index.java | 9 +- .../apache/lucene/xmlparser/TestParser.java | 4 +- .../xmlparser/TestQueryTemplateManager.java | 3 +- .../org/apache/lucene/demo/IndexFiles.java | 7 +- .../org/apache/lucene/demo/IndexHTML.java | 7 +- .../apache/lucene/index/DirectoryReader.java | 2 +- .../apache/lucene/index/DocumentsWriter.java | 36 +- .../org/apache/lucene/index/IndexWriter.java | 304 ++++++--- .../apache/lucene/index/SegmentMerger.java | 4 +- src/test/org/apache/lucene/TestDemo.java | 6 +- .../lucene/TestMergeSchedulerExternal.java | 13 +- src/test/org/apache/lucene/TestSearch.java | 12 +- .../lucene/TestSearchForDuplicates.java | 9 +- .../lucene/TestSnapshotDeletionPolicy.java | 24 +- .../analysis/TestCachingTokenFilter.java | 3 +- .../lucene/analysis/TestKeywordAnalyzer.java | 9 +- .../lucene/collation/CollationTestBase.java | 18 +- .../lucene/document/TestBinaryDocument.java | 13 +- .../apache/lucene/document/TestDocument.java | 12 +- .../org/apache/lucene/index/DocHelper.java | 7 +- .../index/TestAddIndexesNoOptimize.java | 140 +++-- .../apache/lucene/index/TestAtomicUpdate.java | 18 +- .../index/TestBackwardsCompatibility.java | 28 +- .../apache/lucene/index/TestCheckIndex.java | 5 +- .../index/TestConcurrentMergeScheduler.java | 36 +- .../org/apache/lucene/index/TestCrash.java | 9 +- .../lucene/index/TestDeletionPolicy.java | 127 ++-- .../lucene/index/TestDirectoryReader.java | 6 +- src/test/org/apache/lucene/index/TestDoc.java | 7 +- .../lucene/index/TestDocumentWriter.java | 20 +- .../apache/lucene/index/TestFieldsReader.java | 41 +- .../lucene/index/TestFilterIndexReader.java | 4 +- .../lucene/index/TestIndexFileDeleter.java | 15 +- .../apache/lucene/index/TestIndexReader.java | 188 +++--- .../lucene/index/TestIndexReaderClone.java | 8 +- .../index/TestIndexReaderCloneNorms.java | 41 +- .../lucene/index/TestIndexReaderReopen.java | 26 +- .../apache/lucene/index/TestIndexWriter.java | 590 +++++++++--------- .../lucene/index/TestIndexWriterDelete.java | 84 ++- .../index/TestIndexWriterExceptions.java | 18 +- .../index/TestIndexWriterLockRelease.java | 5 +- .../index/TestIndexWriterMergePolicy.java | 69 +- .../lucene/index/TestIndexWriterMerging.java | 16 +- .../lucene/index/TestIndexWriterReader.java | 73 +-- .../org/apache/lucene/index/TestLazyBug.java | 27 +- .../lucene/index/TestLazyProxSkipping.java | 9 +- .../lucene/index/TestMultiLevelSkipList.java | 3 +- .../index/TestNRTReaderWithThreads.java | 10 +- .../org/apache/lucene/index/TestNorms.java | 38 +- .../org/apache/lucene/index/TestOmitTf.java | 32 +- .../lucene/index/TestParallelReader.java | 17 +- .../index/TestParallelReaderEmptyIndex.java | 21 +- .../lucene/index/TestParallelTermEnum.java | 5 +- .../org/apache/lucene/index/TestPayloads.java | 16 +- .../lucene/index/TestSegmentTermDocs.java | 4 +- .../lucene/index/TestSegmentTermEnum.java | 15 +- .../lucene/index/TestStressIndexing.java | 11 +- .../lucene/index/TestStressIndexing2.java | 66 +- .../lucene/index/TestTermVectorsReader.java | 5 +- .../apache/lucene/index/TestTermdocPerf.java | 8 +- .../lucene/index/TestThreadedOptimize.java | 18 +- .../lucene/index/TestTransactionRollback.java | 14 +- .../apache/lucene/index/TestTransactions.java | 31 +- .../TestMultiFieldQueryParser.java | 3 +- .../lucene/queryParser/TestQueryParser.java | 8 +- .../lucene/search/BaseTestRangeFilter.java | 8 +- .../org/apache/lucene/search/QueryUtils.java | 6 +- .../apache/lucene/search/TestBoolean2.java | 7 +- .../search/TestBooleanMinShouldMatch.java | 6 +- .../apache/lucene/search/TestBooleanOr.java | 4 +- .../lucene/search/TestBooleanPrefixQuery.java | 4 +- .../lucene/search/TestBooleanQuery.java | 4 +- .../lucene/search/TestBooleanScorer.java | 4 +- .../search/TestCachingWrapperFilter.java | 6 +- .../lucene/search/TestCustomSearcherSort.java | 3 +- .../apache/lucene/search/TestDateFilter.java | 6 +- .../apache/lucene/search/TestDateSort.java | 4 +- .../search/TestDisjunctionMaxQuery.java | 7 +- .../apache/lucene/search/TestDocBoost.java | 4 +- .../apache/lucene/search/TestDocIdSet.java | 5 +- .../search/TestElevationComparator.java | 6 +- .../lucene/search/TestExplanations.java | 4 +- .../apache/lucene/search/TestFieldCache.java | 4 +- .../search/TestFieldCacheRangeFilter.java | 4 +- .../search/TestFieldCacheTermsFilter.java | 4 +- .../lucene/search/TestFilteredQuery.java | 4 +- .../lucene/search/TestFilteredSearch.java | 8 +- .../apache/lucene/search/TestFuzzyQuery.java | 14 +- .../lucene/search/TestMatchAllDocsQuery.java | 5 +- .../lucene/search/TestMultiPhraseQuery.java | 10 +- .../lucene/search/TestMultiSearcher.java | 24 +- .../search/TestMultiSearcherRanking.java | 10 +- .../search/TestMultiTermConstantScore.java | 12 +- .../search/TestMultiThreadTermVectors.java | 5 +- .../TestMultiValuedNumericRangeQuery.java | 5 +- .../org/apache/lucene/search/TestNot.java | 5 +- .../search/TestNumericRangeQuery32.java | 9 +- .../search/TestNumericRangeQuery64.java | 9 +- .../lucene/search/TestPhrasePrefixQuery.java | 4 +- .../apache/lucene/search/TestPhraseQuery.java | 17 +- .../lucene/search/TestPositionIncrement.java | 9 +- .../lucene/search/TestPrefixFilter.java | 4 +- .../search/TestPrefixInBooleanQuery.java | 6 +- .../apache/lucene/search/TestPrefixQuery.java | 4 +- .../lucene/search/TestQueryWrapperFilter.java | 5 +- .../apache/lucene/search/TestScorerPerf.java | 7 +- .../org/apache/lucene/search/TestSetNorm.java | 4 +- .../apache/lucene/search/TestSimilarity.java | 7 +- .../lucene/search/TestSimpleExplanations.java | 9 +- .../lucene/search/TestSloppyPhraseQuery.java | 6 +- .../org/apache/lucene/search/TestSort.java | 19 +- .../lucene/search/TestSpanQueryFilter.java | 5 +- .../lucene/search/TestTermRangeFilter.java | 14 +- .../lucene/search/TestTermRangeQuery.java | 6 +- .../apache/lucene/search/TestTermScorer.java | 8 +- .../apache/lucene/search/TestTermVectors.java | 26 +- .../apache/lucene/search/TestThreadSafe.java | 7 +- .../search/TestTimeLimitingCollector.java | 4 +- .../lucene/search/TestTopDocsCollector.java | 5 +- .../search/TestTopScoreDocCollector.java | 4 +- .../apache/lucene/search/TestWildcard.java | 5 +- .../search/function/FunctionTestSetup.java | 4 +- .../lucene/search/payloads/PayloadHelper.java | 5 +- .../search/payloads/TestPayloadNearQuery.java | 7 +- .../search/payloads/TestPayloadTermQuery.java | 8 +- .../lucene/search/spans/TestBasics.java | 6 +- .../spans/TestFieldMaskingSpanQuery.java | 6 +- .../search/spans/TestNearSpansOrdered.java | 3 +- .../lucene/search/spans/TestPayloadSpans.java | 31 +- .../apache/lucene/search/spans/TestSpans.java | 8 +- .../search/spans/TestSpansAdvanced.java | 5 +- .../search/spans/TestSpansAdvanced2.java | 7 +- .../lucene/store/TestBufferedIndexInput.java | 10 +- .../lucene/store/TestFileSwitchDirectory.java | 9 +- .../apache/lucene/store/TestLockFactory.java | 25 +- .../apache/lucene/store/TestRAMDirectory.java | 10 +- .../apache/lucene/store/TestWindowsMMap.java | 6 +- .../util/TestFieldCacheSanityChecker.java | 8 +- .../org/apache/lucene/util/_TestUtil.java | 2 +- 188 files changed, 1796 insertions(+), 1644 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 454fe01d389..890e15fd248 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -76,6 +76,15 @@ API Changes use by external code. In addition it offers a matchExtension method which callers can use to query whether a certain file matches a certain extension. (Shai Erera via Mike McCandless) + +* LUCENE-2294: IndexWriter constructors have been deprecated in favor of a + single ctor which accepts IndexWriterConfig and a Directory. You can set all + the parameters related to IndexWriter on IndexWriterConfig. The different + setter/getter methods were deprecated as well. One should call + writer.getConfig().getXYZ() to query for a parameter XYZ. + Additionally, the setter/getter related to MergePolicy were deprecated as + well. One should interact with the MergePolicy directly. + (Shai Erera via Mike McCandless) * LUCENE-124: Add a TopTermsBoostOnlyBooleanQueryRewrite to MultiTermQuery. This rewrite method is similar to TopTermsScoringBooleanQueryRewrite, but diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java index f6cad045a16..c0e65c1b3a9 100644 --- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java +++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java @@ -31,6 +31,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; @@ -51,7 +52,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { super.setUp(); dir = new RAMDirectory(); appAnalyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(dir, appAnalyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(appAnalyzer)); int numDocs = 200; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); diff --git a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java index 234f9b7f7f9..848c8beec6b 100644 --- a/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java +++ b/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java @@ -31,6 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.BooleanClause; @@ -59,7 +60,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { */ public IndexSearcher setUpSearcher(Analyzer analyzer) throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc; doc = new Document(); diff --git a/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java b/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java index 6cd2187b9e5..56d33091479 100644 --- a/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java +++ b/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java @@ -38,7 +38,10 @@ import org.apache.lucene.document.DateTools; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Searcher; @@ -280,15 +283,17 @@ public class IndexTask extends Task { log("checkLastModified = " + checkLastModified, Project.MSG_VERBOSE); - IndexWriter writer = - new IndexWriter(dir, analyzer, create, IndexWriter.MaxFieldLength.LIMITED); - - writer.setUseCompoundFile(useCompoundIndex); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + Version.LUCENE_CURRENT).setAnalyzer(analyzer).setOpenMode( + create ? OpenMode.CREATE : OpenMode.APPEND)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundIndex); + lmp.setUseCompoundDocStore(useCompoundIndex); + lmp.setMergeFactor(mergeFactor); int totalFiles = 0; int totalIndexed = 0; int totalIgnored = 0; try { - writer.setMergeFactor(mergeFactor); for (int i = 0; i < rcs.size(); i++) { ResourceCollection rc = rcs.elementAt(i); diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java index 91c0feb1436..156dec8292e 100644 --- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java +++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java @@ -21,9 +21,12 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.util.Version; import java.io.BufferedOutputStream; import java.io.File; @@ -99,7 +102,7 @@ public class CreateIndexTask extends PerfTask { final double ramBuffer = config.get("ram.flush.mb",OpenIndexTask.DEFAULT_RAM_FLUSH_MB); final int maxBuffered = config.get("max.buffered",OpenIndexTask.DEFAULT_MAX_BUFFERED); - if (maxBuffered == IndexWriter.DISABLE_AUTO_FLUSH) { + if (maxBuffered == IndexWriterConfig.DISABLE_AUTO_FLUSH) { writer.setRAMBufferSizeMB(ramBuffer); writer.setMaxBufferedDocs(maxBuffered); } else { @@ -147,10 +150,9 @@ public class CreateIndexTask extends PerfTask { Config config = runData.getConfig(); IndexWriter writer = new IndexWriter(runData.getDirectory(), - runData.getAnalyzer(), - true, - getIndexDeletionPolicy(config), - IndexWriter.MaxFieldLength.LIMITED); + new IndexWriterConfig(Version.LUCENE_31).setAnalyzer( + runData.getAnalyzer()).setOpenMode(OpenMode.CREATE) + .setIndexDeletionPolicy(getIndexDeletionPolicy(config))); setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1; diff --git a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java index c82cbba24dc..8ea76aabb93 100644 --- a/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java +++ b/contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java @@ -21,7 +21,9 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LogMergePolicy; +import org.apache.lucene.util.Version; import java.io.IOException; @@ -39,10 +41,10 @@ import java.io.IOException; */ public class OpenIndexTask extends PerfTask { - public static final int DEFAULT_MAX_BUFFERED = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS; - public static final int DEFAULT_MAX_FIELD_LENGTH = IndexWriter.DEFAULT_MAX_FIELD_LENGTH; + public static final int DEFAULT_MAX_BUFFERED = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; + public static final int DEFAULT_MAX_FIELD_LENGTH = IndexWriterConfig.UNLIMITED_FIELD_LENGTH; public static final int DEFAULT_MERGE_PFACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; - public static final double DEFAULT_RAM_FLUSH_MB = (int) IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB; + public static final double DEFAULT_RAM_FLUSH_MB = (int) IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; private String commitUserData; public OpenIndexTask(PerfRunData runData) { @@ -61,10 +63,9 @@ public class OpenIndexTask extends PerfTask { } IndexWriter writer = new IndexWriter(runData.getDirectory(), - runData.getAnalyzer(), - CreateIndexTask.getIndexDeletionPolicy(config), - IndexWriter.MaxFieldLength.UNLIMITED, - ic); + new IndexWriterConfig(Version.LUCENE_CURRENT).setAnalyzer( + runData.getAnalyzer()).setIndexDeletionPolicy( + CreateIndexTask.getIndexDeletionPolicy(config)).setIndexCommit(ic)); CreateIndexTask.setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1; diff --git a/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 01c0623ac60..23860c7fd1f 100755 --- a/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ b/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -36,12 +36,15 @@ import org.apache.lucene.benchmark.byTask.stats.TaskStats; import org.apache.lucene.collation.CollationKeyAnalyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; import org.apache.lucene.index.TermDocs; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.TermFreqVector; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.search.FieldCache.StringIndex; import org.apache.lucene.search.FieldCache; @@ -96,7 +99,9 @@ public class TestPerfTasksLogic extends LuceneTestCase { assertEquals("TestSearchTask was supposed to be called!",279,CountingSearchTestTask.numSearches); assertTrue("Index does not exist?...!", IndexReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), + new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); @@ -182,7 +187,7 @@ public class TestPerfTasksLogic extends LuceneTestCase { assertTrue("Index does not exist?...!", IndexReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true); assertEquals("100 docs were added to the index, this is what we expect to find!",100,ir.numDocs()); @@ -221,7 +226,7 @@ public class TestPerfTasksLogic extends LuceneTestCase { assertTrue("Index does not exist?...!", IndexReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); @@ -294,7 +299,7 @@ public class TestPerfTasksLogic extends LuceneTestCase { assertEquals("TestSearchTask was supposed to be called!",139,CountingSearchTestTask.numSearches); assertTrue("Index does not exist?...!", IndexReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true); assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs()); @@ -417,7 +422,9 @@ public class TestPerfTasksLogic extends LuceneTestCase { benchmark = execBenchmark(algLines2); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(),null,false,IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), + new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory(), true); @@ -655,7 +662,9 @@ public class TestPerfTasksLogic extends LuceneTestCase { // 2. execute the algorithm (required in every "logic" test) Benchmark benchmark = execBenchmark(algLines); - assertTrue("did not use the specified MergeScheduler", ((MyMergeScheduler) benchmark.getRunData().getIndexWriter().getMergeScheduler()).called); + assertTrue("did not use the specified MergeScheduler", + ((MyMergeScheduler) benchmark.getRunData().getIndexWriter().getConfig() + .getMergeScheduler()).called); benchmark.getRunData().getIndexWriter().close(); // 3. test number of docs in the index @@ -743,10 +752,10 @@ public class TestPerfTasksLogic extends LuceneTestCase { // 2. execute the algorithm (required in every "logic" test) Benchmark benchmark = execBenchmark(algLines); final IndexWriter writer = benchmark.getRunData().getIndexWriter(); - assertEquals(2, writer.getMaxBufferedDocs()); - assertEquals(IndexWriter.DISABLE_AUTO_FLUSH, (int) writer.getRAMBufferSizeMB()); - assertEquals(3, writer.getMergeFactor()); - assertFalse(writer.getUseCompoundFile()); + assertEquals(2, writer.getConfig().getMaxBufferedDocs()); + assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, (int) writer.getConfig().getRAMBufferSizeMB()); + assertEquals(3, ((LogMergePolicy) writer.getMergePolicy()).getMergeFactor()); + assertFalse(((LogMergePolicy) writer.getMergePolicy()).getUseCompoundFile()); writer.close(); Directory dir = benchmark.getRunData().getDirectory(); IndexReader reader = IndexReader.open(dir, true); diff --git a/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java index 289cfe154cd..e0ad1e62e94 100644 --- a/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java +++ b/contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java @@ -30,10 +30,10 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.index.TermPositionVector; import org.apache.lucene.index.TermVectorOffsetInfo; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -50,13 +50,13 @@ public class FieldTermStack { LinkedList termList = new LinkedList(); public static void main( String[] args ) throws Exception { - Analyzer analyzer = new WhitespaceAnalyzer(); + Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "f", analyzer ); Query query = parser.parse( "a x:b" ); FieldQuery fieldQuery = new FieldQuery( query, true, false ); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter( dir, analyzer, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); doc.add( new Field( "f", "a a a b b c a b b c d e f", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); doc.add( new Field( "f", "b a b a f", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); diff --git a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java index 528c29dd3f0..57e661fba03 100644 --- a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java +++ b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java @@ -35,8 +35,9 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.PhraseQuery; @@ -326,7 +327,9 @@ public abstract class AbstractTestCase extends LuceneTestCase { // make 1 doc with multi valued field protected void make1dmfIndex( Analyzer analyzer, String... values ) throws Exception { - IndexWriter writer = new IndexWriter( dir, analyzer, true, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer) + .setOpenMode(OpenMode.CREATE)); Document doc = new Document(); for( String value: values ) doc.add( new Field( F, value, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); @@ -338,7 +341,9 @@ public abstract class AbstractTestCase extends LuceneTestCase { // make 1 doc with multi valued & not analyzed field protected void make1dmfIndexNA( String... values ) throws Exception { - IndexWriter writer = new IndexWriter( dir, analyzerK, true, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setAnalyzer( + analyzerK)); Document doc = new Document(); for( String value: values ) doc.add( new Field( F, value, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); diff --git a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java index ac0311afeba..479c90ac319 100644 --- a/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java +++ b/contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java @@ -24,7 +24,8 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.Query; public class SimpleFragmentsBuilderTest extends AbstractTestCase { @@ -118,7 +119,9 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase { } protected void makeUnstoredIndex() throws Exception { - IndexWriter writer = new IndexWriter( dir, analyzerW, true, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setAnalyzer( + analyzerW)); Document doc = new Document(); doc.add( new Field( F, "aaa", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); writer.addDocument( doc ); diff --git a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java index 5113b19e952..67bea5b18df 100644 --- a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java +++ b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java @@ -21,7 +21,6 @@ import java.io.IOException; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; @@ -33,9 +32,9 @@ import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositionVector; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; @@ -59,7 +58,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED); + new IndexWriterConfig(TEST_VERSION_CURRENT)); try { final Document document = new Document(); document.add(new Field(FIELD, new TokenStreamConcurrent(), @@ -102,7 +101,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED); + new IndexWriterConfig(TEST_VERSION_CURRENT)); try { final Document document = new Document(); document.add(new Field(FIELD, new TokenStreamConcurrent(), @@ -171,7 +170,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED); + new IndexWriterConfig(TEST_VERSION_CURRENT)); try { final Document document = new Document(); document.add(new Field(FIELD, new TokenStreamSparse(), @@ -213,7 +212,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED); + new IndexWriterConfig(TEST_VERSION_CURRENT)); try { final Document document = new Document(); document.add(new Field(FIELD, TEXT, Store.YES, Index.ANALYZED, @@ -253,7 +252,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = new RAMDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED); + new IndexWriterConfig(TEST_VERSION_CURRENT)); try { final Document document = new Document(); document.add(new Field(FIELD, new TokenStreamSparse(), diff --git a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index 02d9375da66..6ca56165e8f 100644 --- a/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -51,8 +51,9 @@ import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Store; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.BooleanQuery; @@ -80,7 +81,6 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.Version; import org.w3c.dom.Element; import org.w3c.dom.NodeList; @@ -89,8 +89,6 @@ import org.w3c.dom.NodeList; * */ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter { - // TODO: change to CURRENT, does not work because posIncr: - static final Version TEST_VERSION = TEST_VERSION_CURRENT; private IndexReader reader; static final String FIELD_NAME = "contents"; @@ -99,7 +97,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte RAMDirectory ramDir; public IndexSearcher searcher = null; int numHighlights = 0; - final Analyzer analyzer = new StandardAnalyzer(TEST_VERSION); + final Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); TopDocs hits; String[] texts = { @@ -120,7 +118,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte public void testQueryScorerHits() throws Exception { Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); - QueryParser qp = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer); query = qp.parse("\"very long\""); searcher = new IndexSearcher(ramDir, true); TopDocs hits = searcher.search(query, 10); @@ -150,7 +148,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte String s1 = "I call our world Flatland, not because we call it so,"; - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, new StandardAnalyzer(TEST_VERSION)); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT)); // Verify that a query against the default field results in text being // highlighted @@ -182,7 +180,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte */ private static String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new StandardAnalyzer(TEST_VERSION).tokenStream(fieldName, new StringReader(text)); + TokenStream tokenStream = new StandardAnalyzer(TEST_VERSION_CURRENT).tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME); @@ -228,7 +226,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2 + " OR " + f2c + ph2 + ")"; Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); - QueryParser qp = new QueryParser(TEST_VERSION, f1, analyzer); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, f1, analyzer); Query query = qp.parse(q); QueryScorer scorer = new QueryScorer(query, f1); @@ -678,7 +676,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte // Need to explicitly set the QueryParser property to use TermRangeQuery // rather // than RangeFilters - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); query = parser.parse(queryString); doSearching(query); @@ -1028,7 +1026,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte String srchkey = "football"; String s = "football-soccer in the euro 2004 footie competition"; - QueryParser parser = new QueryParser(TEST_VERSION, "bookid", analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "bookid", analyzer); Query query = parser.parse(srchkey); TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s)); @@ -1154,13 +1152,13 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte sb.append(stopWords.iterator().next()); } SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(TEST_VERSION, stopWords).tokenStream( + Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords).tokenStream( "data", new StringReader(sb.toString())), fm);// new Highlighter(fm, // new // QueryTermScorer(query)); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(100); - match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString()); + match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords), "data", sb.toString()); assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg .getMaxDocCharsToAnalyze()); @@ -1171,7 +1169,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte // + whitespace) sb.append(" "); sb.append(goodWord); - match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString()); + match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords), "data", sb.toString()); assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg .getMaxDocCharsToAnalyze()); } @@ -1192,11 +1190,11 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte String text = "this is a text with searchterm in it"; SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer(TEST_VERSION, + Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords).tokenStream("text", new StringReader(text)), fm); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(36); - String match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "text", text); + String match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords), "text", text); assertTrue( "Matched text should contain remainder of text after highlighted query ", match.endsWith("in it")); @@ -1213,9 +1211,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte numHighlights = 0; // test to show how rewritten query can still be used searcher = new IndexSearcher(ramDir, true); - Analyzer analyzer = new StandardAnalyzer(TEST_VERSION); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer); Query query = parser.parse("JF? or Kenned*"); System.out.println("Searching with primitive query"); // forget to set this and... @@ -1326,7 +1324,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte public void testMultiSearcher() throws Exception { // setup index 1 RAMDirectory ramDir1 = new RAMDirectory(); - IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer1 = new IndexWriter(ramDir1, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); Document d = new Document(); Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1337,7 +1337,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte // setup index 2 RAMDirectory ramDir2 = new RAMDirectory(); - IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer2 = new IndexWriter(ramDir2, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); d = new Document(); f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1350,7 +1352,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte searchers[0] = new IndexSearcher(ramDir1, true); searchers[1] = new IndexSearcher(ramDir2, true); MultiSearcher multiSearcher = new MultiSearcher(searchers); - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, new StandardAnalyzer(TEST_VERSION)); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT)); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); query = parser.parse("multi*"); System.out.println("Searching for: " + query.toString(FIELD_NAME)); @@ -1384,7 +1386,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte @Override public void run() throws Exception { String docMainText = "fred is one of the people"; - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer); Query query = parser.parse("fred category:people"); // highlighting respects fieldnames used in query @@ -1530,64 +1532,64 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Highlighter highlighter; String result; - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed"); highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); // ///////////////// same tests, just put the bigger overlapping token // first - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); - query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed"); + query = new QueryParser(TEST_VERSION_CURRENT, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed"); highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this); result = highlighter.getBestFragments(getTS2a(), s, 3, "..."); assertEquals("Hi-Speed10 foo", result); @@ -1613,7 +1615,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void makeIndex() throws IOException { - IndexWriter writer = new IndexWriter( dir, a, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); writer.addDocument( doc( "t_text1", "more random words for second field del" ) ); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); @@ -1623,7 +1625,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void deleteDocument() throws IOException { - IndexWriter writer = new IndexWriter( dir, a, false, MaxFieldLength.LIMITED ); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.deleteDocuments( new Term( "t_text1", "del" ) ); // To see negative idf, keep comment the following line //writer.optimize(); @@ -1632,7 +1634,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte private void searchIndex() throws IOException, ParseException, InvalidTokenOffsetsException { String q = "t_text1:random"; - QueryParser parser = new QueryParser(TEST_VERSION, "t_text1", a ); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "t_text1", a ); Query query = parser.parse( q ); IndexSearcher searcher = new IndexSearcher( dir, true ); // This scorer can return negative idf -> null fragment @@ -1686,7 +1688,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } public void doSearching(String queryString) throws Exception { - QueryParser parser = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer); + QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer); parser.setEnablePositionIncrements(true); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); query = parser.parse(queryString); @@ -1725,7 +1727,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte protected void setUp() throws Exception { super.setUp(); ramDir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); for (int i = 0; i < texts.length; i++) { addDoc(writer, texts[i]); } diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java index a980f0d2941..34687cddc52 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java @@ -16,9 +16,12 @@ package org.apache.lucene.store.instantiated; -import junit.framework.TestCase; +import java.io.IOException; +import java.util.Arrays; + import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; import org.apache.lucene.search.IndexSearcher; @@ -26,11 +29,9 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; -import java.util.Arrays; -import java.io.IOException; - -public class TestEmptyIndex extends TestCase { +public class TestEmptyIndex extends LuceneTestCase { public void testSearch() throws Exception { @@ -60,7 +61,7 @@ public class TestEmptyIndex extends TestCase { // make sure a Directory acts the same Directory d = new RAMDirectory(); - new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close(); + new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT)).close(); r = IndexReader.open(d, false); testNorms(r); r.close(); @@ -93,7 +94,7 @@ public class TestEmptyIndex extends TestCase { // make sure a Directory acts the same Directory d = new RAMDirectory(); - new IndexWriter(d, null, true, IndexWriter.MaxFieldLength.UNLIMITED).close(); + new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT)).close(); r = IndexReader.open(d, false); termEnumTest(r); r.close(); diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java index 8d8d7563645..6089082470c 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java @@ -30,6 +30,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Payload; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; @@ -60,7 +61,9 @@ public class TestIndicesEquals extends LuceneTestCase { RAMDirectory dir = new RAMDirectory(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); for (int i = 0; i < 20; i++) { Document document = new Document(); assembleDocument(document, i); @@ -84,7 +87,9 @@ public class TestIndicesEquals extends LuceneTestCase { InstantiatedIndex ii = new InstantiatedIndex(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); for (int i = 0; i < 500; i++) { Document document = new Document(); assembleDocument(document, i); diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java index 2ee5dd2920e..be50ff88626 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java @@ -22,7 +22,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.analysis.WhitespaceAnalyzer; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -35,7 +35,7 @@ public class TestSerialization extends LuceneTestCase { Directory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("foo", "bar rab abr bra rba", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); doc.add(new Field("moo", "bar rab abr bra rba", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); diff --git a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java index 81f77d51420..8d837ebb2a5 100644 --- a/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java +++ b/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java @@ -18,10 +18,11 @@ package org.apache.lucene.store.instantiated; import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -32,17 +33,17 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase { public void test() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); addDocument(iw, "Hello, world!"); addDocument(iw, "All work and no play makes jack a dull boy"); iw.close(); - iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); addDocument(iw, "Hello, tellus!"); addDocument(iw, "All work and no play makes danny a dull boy"); iw.close(); - iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); addDocument(iw, "Hello, earth!"); addDocument(iw, "All work and no play makes wendy a dull girl"); iw.close(); diff --git a/contrib/lucli/src/java/lucli/LuceneMethods.java b/contrib/lucli/src/java/lucli/LuceneMethods.java index 1e602139b29..a8abca0b056 100644 --- a/contrib/lucli/src/java/lucli/LuceneMethods.java +++ b/contrib/lucli/src/java/lucli/LuceneMethods.java @@ -42,9 +42,11 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; import org.apache.lucene.index.IndexReader.FieldOption; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.search.Collector; @@ -169,7 +171,9 @@ class LuceneMethods { public void optimize() throws IOException { //open the index writer. False: don't create a new one - IndexWriter indexWriter = new IndexWriter(indexName, createAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter indexWriter = new IndexWriter(indexName, new IndexWriterConfig( + Version.LUCENE_CURRENT).setAnalyzer(createAnalyzer()).setOpenMode( + OpenMode.APPEND)); message("Starting to optimize index."); long start = System.currentTimeMillis(); indexWriter.optimize(); diff --git a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java index 12b60525796..6dbf63b7f64 100644 --- a/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java +++ b/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java @@ -42,6 +42,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Collector; @@ -410,7 +411,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { RAMDirectory dir = new RAMDirectory(); IndexWriter writer = null; try { - writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); writer.addDocument(doc); writer.optimize(); return dir; diff --git a/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java b/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java index 3df256f13d2..4604b1aa105 100644 --- a/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java +++ b/contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java @@ -21,11 +21,11 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.OpenBitSet; +import org.apache.lucene.util.Version; /** * This tool splits input index into multiple equal parts. The method employed @@ -88,8 +88,7 @@ public class MultiPassIndexSplitter { } } } - IndexWriter w = new IndexWriter(outputs[i], new WhitespaceAnalyzer(), - true, MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(outputs[i], new IndexWriterConfig(Version.LUCENE_CURRENT).setOpenMode(OpenMode.CREATE)); System.err.println("Writing part " + (i + 1) + " ..."); w.addIndexes(new IndexReader[]{input}); w.close(); diff --git a/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java b/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java index a7d3ef006c6..8297d7af4c0 100644 --- a/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java +++ b/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java @@ -17,9 +17,11 @@ package org.apache.lucene.misc; */ import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.analysis.SimpleAnalyzer; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.util.Version; import java.io.File; import java.io.IOException; @@ -36,7 +38,8 @@ public class IndexMergeTool { } FSDirectory mergedIndex = FSDirectory.open(new File(args[0])); - IndexWriter writer = new IndexWriter(mergedIndex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(mergedIndex, new IndexWriterConfig( + Version.LUCENE_CURRENT).setOpenMode(OpenMode.CREATE)); Directory[] indexes = new Directory[args.length - 1]; for (int i = 1; i < args.length; i++) { diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java index 17ba8ff8c08..31bc366e39a 100644 --- a/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java +++ b/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java @@ -23,7 +23,6 @@ import java.util.Arrays; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.IndexSearcher; @@ -58,7 +57,9 @@ public class TestFieldNormModifier extends LuceneTestCase { @Override protected void setUp() throws Exception { super.setUp(); - IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(store, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new SimpleAnalyzer( + TEST_VERSION_CURRENT))); for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index 0a96fc80f4f..f0c4a53c0b1 100644 --- a/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ b/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -18,9 +18,8 @@ package org.apache.lucene.index; import java.io.File; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; @@ -35,7 +34,7 @@ public class TestIndexSplitter extends LuceneTestCase { _TestUtil.rmDir(destDir); destDir.mkdirs(); FSDirectory fsDir = FSDirectory.open(dir); - IndexWriter iw = new IndexWriter(fsDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(fsDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); for (int x=0; x < 100; x++) { Document doc = TestIndexWriterReader.createDocument(x, "index", 5); iw.addDocument(doc); diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java index 04a312a74a4..db04e81b789 100644 --- a/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java +++ b/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java @@ -16,10 +16,8 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -32,8 +30,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); RAMDirectory dir = new RAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); diff --git a/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java b/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java index e4e794a3bfb..51f89a11ded 100644 --- a/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java +++ b/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java @@ -28,7 +28,7 @@ public class TestTermVectorAccessor extends LuceneTestCase { public void test() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()))); Document doc; diff --git a/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java b/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java index d6c6a8578fb..75466857870 100644 --- a/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java +++ b/contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java @@ -20,13 +20,11 @@ package org.apache.lucene.misc; import java.util.Calendar; import java.util.GregorianCalendar; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.CachingWrapperFilter; @@ -58,8 +56,7 @@ public class ChainedFilterTest extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = - new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); Calendar cal = new GregorianCalendar(); cal.clear(); @@ -187,9 +184,7 @@ public class ChainedFilterTest extends LuceneTestCase { public void testWithCachingFilter() throws Exception { Directory dir = new RAMDirectory(); - Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); - - IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.close(); Searcher searcher = new IndexSearcher(dir, true); diff --git a/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java index 8c49e2063bf..bfb75beefb3 100644 --- a/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java +++ b/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java @@ -19,14 +19,13 @@ package org.apache.lucene.misc; import java.io.IOException; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.FieldNormModifier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.IndexSearcher; @@ -61,7 +60,7 @@ public class TestLengthNormModifier extends LuceneTestCase { @Override protected void setUp() throws Exception { super.setUp(); - IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); diff --git a/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java b/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java index d84e24c71f6..da2c7dfd4ee 100644 --- a/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java +++ b/contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java @@ -24,7 +24,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -113,7 +113,7 @@ public class TestComplexPhraseQuery extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); RAMDirectory rd = new RAMDirectory(); - IndexWriter w = new IndexWriter(rd, analyzer, MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); for (int i = 0; i < docsContent.length; i++) { Document doc = new Document(); doc.add(new Field("name", docsContent[i].name, Field.Store.YES, diff --git a/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java index 479e6cfd7ce..7ba06db50ff 100644 --- a/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java +++ b/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java @@ -19,11 +19,11 @@ package org.apache.lucene.search; import java.io.IOException; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -36,7 +36,7 @@ public class BooleanFilterTest extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags addDoc(writer, "admin guest", "010", "20040101","Y"); diff --git a/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java index b0a4c961e4e..f3675a5aac0 100644 --- a/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java +++ b/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java @@ -25,6 +25,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; import org.apache.lucene.store.RAMDirectory; @@ -41,7 +42,7 @@ public class DuplicateFilterTest extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT))); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); diff --git a/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java index 6b553d4f127..98ff6afc8c4 100644 --- a/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java +++ b/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java @@ -25,8 +25,8 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -39,7 +39,8 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, analyzer,true, MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer)); //Add series of docs with misspelt names addDoc(writer, "jonathon smythe","1"); diff --git a/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java b/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java index 58630d2e78a..c79200efba8 100644 --- a/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java +++ b/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java @@ -19,13 +19,12 @@ package org.apache.lucene.search; import java.util.HashSet; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.OpenBitSet; @@ -54,9 +53,8 @@ public class TermsFilterTest extends LuceneTestCase { { String fieldName="field1"; RAMDirectory rd=new RAMDirectory(); - IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),MaxFieldLength.UNLIMITED); - for (int i = 0; i < 100; i++) - { + IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT)); + for (int i = 0; i < 100; i++) { Document doc=new Document(); int term=i*10; //terms are units of 10; doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED)); diff --git a/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java b/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java index 597df2488cc..e55670e53ee 100644 --- a/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java +++ b/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java @@ -28,7 +28,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -45,8 +45,7 @@ public class TestMoreLikeThis extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT), - true, MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT))); // Add series of docs with specific information for MoreLikeThis addDoc(writer, "lucene"); diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java index 537b2c4b63b..d1803abde77 100644 --- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java +++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java @@ -27,6 +27,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator; import org.apache.lucene.search.BooleanClause; @@ -319,8 +320,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { public void testStopWordSearching() throws Exception { Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); Directory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, analyzer, true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED)); diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java index 264a98b6501..88badbdda0e 100644 --- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java +++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java @@ -40,7 +40,11 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; /** - * Tests multi field query parsing using the {@link MultiFieldQueryParserWrapper}. + * Tests multi field query parsing using the + * {@link MultiFieldQueryParserWrapper}. + * + * @deprecated this tests test the deprecated MultiFieldQueryParserWrapper, so + * when the latter is gone, so should this test. */ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase { diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java index 965c26affd6..bafc5b8c17a 100644 --- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java +++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java @@ -51,6 +51,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.messages.MessageImpl; import org.apache.lucene.queryParser.core.QueryNodeException; @@ -571,8 +572,7 @@ public class TestQPHelper extends LocalizedTestCase { public void testFarsiRangeCollating() throws Exception { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.NOT_ANALYZED)); @@ -994,8 +994,7 @@ public class TestQPHelper extends LocalizedTestCase { public void testLocalDateFormat() throws IOException, QueryNodeException { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); iw.close(); @@ -1193,7 +1192,7 @@ public class TestQPHelper extends LocalizedTestCase { public void testMultiPhraseQuery() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new CannedAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new CannedAnalyzer())); Document doc = new Document(); doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); diff --git a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java index ee942a631cb..1eb5801cd71 100644 --- a/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java +++ b/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java @@ -78,6 +78,9 @@ import org.apache.lucene.util.LocalizedTestCase; * to use new {@link QueryParserWrapper} instead of the old query parser. * * Tests QueryParser. + * + * @deprecated this entire test case tests QueryParserWrapper which is + * deprecated. When QPW is gone, so will the test. */ public class TestQueryParserWrapper extends LocalizedTestCase { diff --git a/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java b/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java index 2a09b0c5127..e0c943a117b 100644 --- a/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java +++ b/contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java @@ -19,8 +19,8 @@ package org.apache.lucene.search.regex; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.IndexSearcher; @@ -40,8 +40,7 @@ public class TestRegexQuery extends LuceneTestCase { super.setUp(); RAMDirectory directory = new RAMDirectory(); try { - IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); diff --git a/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java b/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java index 27f689774ab..d786ec68d62 100644 --- a/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java +++ b/contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java @@ -19,13 +19,13 @@ package org.apache.lucene.search.regex; import java.io.IOException; -import org.apache.lucene.analysis.SimpleAnalyzer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiSearcher; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -44,7 +44,7 @@ public class TestSpanRegexQuery extends LuceneTestCase { public void testSpanRegex() throws Exception { RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); // doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", // Field.Store.NO, Field.Index.ANALYZED)); @@ -109,15 +109,15 @@ public class TestSpanRegexQuery extends LuceneTestCase { Field.Index.ANALYZED_NO_NORMS)); // creating first index writer - IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT), - true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writerA = new IndexWriter(indexStoreA, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); writerA.addDocument(lDoc); writerA.optimize(); writerA.close(); // creating second index writer - IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), - true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writerB = new IndexWriter(indexStoreB, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); writerB.addDocument(lDoc2); writerB.optimize(); writerB.close(); diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java index 796f9b67892..9d871c52b54 100644 --- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java +++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java @@ -27,6 +27,7 @@ import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; @@ -57,8 +58,9 @@ public class TestRemoteCachingWrapperFilter extends LuceneTestCase { private static void startServer() throws Exception { // construct an index RAMDirectory indexStore = new RAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new SimpleAnalyzer( + TEST_VERSION_CURRENT))); Document doc = new Document(); doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED)); doc.add(new Field("type", "A", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java index 950f13b5dbf..42fc738ccaf 100644 --- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java +++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java @@ -19,9 +19,9 @@ package org.apache.lucene.search; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.*; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; @@ -58,7 +58,7 @@ public class TestRemoteSearchable extends LuceneTestCase { private static void startServer() throws Exception { // construct an index RAMDirectory indexStore = new RAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(TEST_VERSION_CURRENT),true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(indexStore,new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED)); doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java index caf0060b707..d5dd67f4198 100644 --- a/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java +++ b/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java @@ -30,11 +30,12 @@ import junit.framework.Test; import junit.framework.TestSuite; import junit.textui.TestRunner; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -109,9 +110,9 @@ public class TestRemoteSort extends LuceneTestCase implements Serializable { private Searcher getIndex (boolean even, boolean odd) throws IOException { RAMDirectory indexStore = new RAMDirectory (); - IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(1000); + IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(1000); for (int i=0; i @@ -139,8 +142,7 @@ public class SpellChecker implements java.io.Closeable { synchronized (modifyCurrentIndexLock) { ensureOpen(); if (!IndexReader.indexExists(spellIndexDir)) { - IndexWriter writer = new IndexWriter(spellIndexDir, null, true, - IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(spellIndexDir, new IndexWriterConfig(Version.LUCENE_CURRENT)); writer.close(); } swapSearcher(spellIndexDir); @@ -353,7 +355,8 @@ public class SpellChecker implements java.io.Closeable { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.spellIndex; - final IndexWriter writer = new IndexWriter(dir, null, true, IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + Version.LUCENE_CURRENT).setOpenMode(OpenMode.CREATE)); writer.close(); swapSearcher(dir); } @@ -388,10 +391,8 @@ public class SpellChecker implements java.io.Closeable { synchronized (modifyCurrentIndexLock) { ensureOpen(); final Directory dir = this.spellIndex; - final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), - IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMergeFactor(mergeFactor); - writer.setRAMBufferSizeMB(ramMB); + final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_CURRENT).setRAMBufferSizeMB(ramMB)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(mergeFactor); Iterator iter = dict.getWordsIterator(); while (iter.hasNext()) { diff --git a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java index cf9902d6f9e..3926a139bc2 100644 --- a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java +++ b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java @@ -20,11 +20,11 @@ package org.apache.lucene.search.spell; import java.io.IOException; import java.util.Iterator; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -46,7 +46,7 @@ public class TestLuceneDictionary extends LuceneTestCase { @Override protected void setUp() throws Exception { super.setUp(); - IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(LuceneTestCase.TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc; diff --git a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java index fd4d1ecd071..3c14c619c8c 100755 --- a/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java +++ b/contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java @@ -26,12 +26,12 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -54,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase { //create a user index userindex = new RAMDirectory(); - IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < 1000; i++) { Document doc = new Document(); diff --git a/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java b/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java index c74d964988a..9cd234fb61a 100644 --- a/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java +++ b/contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java @@ -19,11 +19,11 @@ package org.apache.lucene.queryParser.surround.query; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.analysis.WhitespaceAnalyzer; -import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; public class SingleFieldTestDb { private Directory db; @@ -35,9 +35,7 @@ public class SingleFieldTestDb { db = new RAMDirectory(); docs = documents; fieldName = fName; - Analyzer analyzer = new WhitespaceAnalyzer(); - IndexWriter writer = new IndexWriter(db, analyzer, true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(Version.LUCENE_CURRENT)); for (int j = 0; j < docs.length; j++) { Document d = new Document(); d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED)); diff --git a/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java b/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java index 65f97d15980..219d27da4ba 100644 --- a/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java +++ b/contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java @@ -31,6 +31,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; @@ -87,7 +88,7 @@ public class ListSearcher extends AbstractListModel { private ListDataListener listModelListener; public ListSearcher(ListModel newModel) { - analyzer = new WhitespaceAnalyzer(); + analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); setListModel(newModel); listModelListener = new ListModelHandler(); newModel.addListDataListener(listModelListener); @@ -117,7 +118,7 @@ public class ListSearcher extends AbstractListModel { try { // recreate the RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_CURRENT).setAnalyzer(analyzer)); // iterate through all rows for (int row=0; row < listModel.getSize(); row++){ diff --git a/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java b/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java index 1bcb510813a..ed92e727d3b 100644 --- a/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java +++ b/contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java @@ -29,6 +29,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.MultiFieldQueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -115,7 +116,7 @@ public class TableSearcher extends AbstractTableModel { * @param tableModel The table model to decorate */ public TableSearcher(TableModel tableModel) { - analyzer = new WhitespaceAnalyzer(); + analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); tableModelListener = new TableModelHandler(); setTableModel(tableModel); tableModel.addTableModelListener(tableModelListener); @@ -163,7 +164,7 @@ public class TableSearcher extends AbstractTableModel { try { // recreate the RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_CURRENT).setAnalyzer(analyzer)); // iterate through all rows for (int row=0; row < tableModel.getRowCount(); row++){ diff --git a/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java b/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java index 9e9f048350b..0182a4cece0 100644 --- a/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java +++ b/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java @@ -35,6 +35,9 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogMergePolicy; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; @@ -245,8 +248,10 @@ public class Syns2Index try { // override the specific index if it already exists - IndexWriter writer = new IndexWriter(dir, ana, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(true); // why? + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + Version.LUCENE_CURRENT).setOpenMode(OpenMode.CREATE).setAnalyzer(ana)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true); // why? + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(true); // why? Iterator i1 = word2Nums.keySet().iterator(); while (i1.hasNext()) // for each word { diff --git a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java index e282627bf16..ae166d81b59 100644 --- a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java +++ b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java @@ -12,12 +12,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -63,7 +65,7 @@ public class TestParser extends TestCase { { BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); dir=new RAMDirectory(); - IndexWriter writer=new IndexWriter(dir,analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_24).setAnalyzer(analyzer)); String line = d.readLine(); while(line!=null) { diff --git a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java index b23b729f997..e5cb3b82eb5 100644 --- a/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java +++ b/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java @@ -11,6 +11,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.RAMDirectory; @@ -141,7 +142,7 @@ public class TestQueryTemplateManager extends LuceneTestCase { //Create an index RAMDirectory dir=new RAMDirectory(); - IndexWriter w=new IndexWriter(dir,analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w=new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); for (int i = 0; i < docFieldValues.length; i++) { w.addDocument(getDocumentFromString(docFieldValues[i])); diff --git a/src/demo/org/apache/lucene/demo/IndexFiles.java b/src/demo/org/apache/lucene/demo/IndexFiles.java index def29566986..a3fbfa349dd 100644 --- a/src/demo/org/apache/lucene/demo/IndexFiles.java +++ b/src/demo/org/apache/lucene/demo/IndexFiles.java @@ -19,6 +19,8 @@ package org.apache.lucene.demo; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; @@ -55,7 +57,10 @@ public class IndexFiles { Date start = new Date(); try { - IndexWriter writer = new IndexWriter(FSDirectory.open(INDEX_DIR), new StandardAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(FSDirectory.open(INDEX_DIR), + new IndexWriterConfig(Version.LUCENE_CURRENT).setOpenMode( + OpenMode.CREATE).setAnalyzer( + new StandardAnalyzer(Version.LUCENE_CURRENT))); System.out.println("Indexing to directory '" +INDEX_DIR+ "'..."); indexDocs(writer, docDir); System.out.println("Optimizing..."); diff --git a/src/demo/org/apache/lucene/demo/IndexHTML.java b/src/demo/org/apache/lucene/demo/IndexHTML.java index 89ee87cd74d..1b824a186cd 100644 --- a/src/demo/org/apache/lucene/demo/IndexHTML.java +++ b/src/demo/org/apache/lucene/demo/IndexHTML.java @@ -21,8 +21,10 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; @@ -77,8 +79,9 @@ public class IndexHTML { deleting = true; indexDocs(root, index, create); } - writer = new IndexWriter(FSDirectory.open(index), new StandardAnalyzer(Version.LUCENE_CURRENT), create, - new IndexWriter.MaxFieldLength(1000000)); + writer = new IndexWriter(FSDirectory.open(index), new IndexWriterConfig( + Version.LUCENE_CURRENT).setAnalyzer(new StandardAnalyzer( + Version.LUCENE_CURRENT)).setMaxFieldLength(1000000).setOpenMode(create ? OpenMode.CREATE : OpenMode.CREATE_OR_APPEND)); indexDocs(root, index, create); // add new docs System.out.println("Optimizing index..."); diff --git a/src/java/org/apache/lucene/index/DirectoryReader.java b/src/java/org/apache/lucene/index/DirectoryReader.java index 657a8fc14b1..e5a0a5e7593 100644 --- a/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/src/java/org/apache/lucene/index/DirectoryReader.java @@ -742,7 +742,7 @@ class DirectoryReader extends IndexReader implements Cloneable { if (writeLock == null) { Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME); - if (!writeLock.obtain(IndexWriter.WRITE_LOCK_TIMEOUT)) // obtain write lock + if (!writeLock.obtain(IndexWriterConfig.WRITE_LOCK_TIMEOUT)) // obtain write lock throw new LockObtainFailedException("Index locked for write: " + writeLock); this.writeLock = writeLock; diff --git a/src/java/org/apache/lucene/index/DocumentsWriter.java b/src/java/org/apache/lucene/index/DocumentsWriter.java index 3c1a6ca28a8..959a0b17c75 100644 --- a/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -138,7 +138,7 @@ final class DocumentsWriter { private DocFieldProcessor docFieldProcessor; PrintStream infoStream; - int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH; + int maxFieldLength = IndexWriterConfig.UNLIMITED_FIELD_LENGTH; Similarity similarity; List newFiles; @@ -223,7 +223,7 @@ final class DocumentsWriter { abstract DocConsumer getChain(DocumentsWriter documentsWriter); } - static final IndexingChain DefaultIndexingChain = new IndexingChain() { + static final IndexingChain defaultIndexingChain = new IndexingChain() { @Override DocConsumer getChain(DocumentsWriter documentsWriter) { @@ -270,22 +270,22 @@ final class DocumentsWriter { // The max number of delete terms that can be buffered before // they must be flushed to disk. - private int maxBufferedDeleteTerms = IndexWriter.DEFAULT_MAX_BUFFERED_DELETE_TERMS; + private int maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS; // How much RAM we can use before flushing. This is 0 if // we are flushing by doc count instead. - private long ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024); + private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024); private long waitQueuePauseBytes = (long) (ramBufferSize*0.1); private long waitQueueResumeBytes = (long) (ramBufferSize*0.05); // If we've allocated 5% over our RAM budget, we then // free down to 95% - private long freeTrigger = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*1.05); - private long freeLevel = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95); + private long freeTrigger = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*1.05); + private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95); // Flush @ this number of docs. If ramBufferSize is // non-zero we will flush by RAM usage instead. - private int maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS; + private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; private int flushedDocCount; // How many docs already flushed to index @@ -304,7 +304,7 @@ final class DocumentsWriter { DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) throws IOException { this.directory = directory; this.writer = writer; - this.similarity = writer.getSimilarity(); + this.similarity = writer.getConfig().getSimilarity(); flushedDocCount = writer.maxDoc(); consumer = indexingChain.getChain(this); @@ -342,8 +342,8 @@ final class DocumentsWriter { /** Set how much RAM we can use before flushing. */ synchronized void setRAMBufferSizeMB(double mb) { - if (mb == IndexWriter.DISABLE_AUTO_FLUSH) { - ramBufferSize = IndexWriter.DISABLE_AUTO_FLUSH; + if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) { + ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH; waitQueuePauseBytes = 4*1024*1024; waitQueueResumeBytes = 2*1024*1024; } else { @@ -356,7 +356,7 @@ final class DocumentsWriter { } synchronized double getRAMBufferSizeMB() { - if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH) { + if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) { return ramBufferSize; } else { return ramBufferSize/1024./1024.; @@ -587,7 +587,7 @@ final class DocumentsWriter { synchronized private void initFlushState(boolean onlyDocStore) { initSegmentName(onlyDocStore); - flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getTermIndexInterval()); + flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getConfig().getTermIndexInterval()); } /** Flush all pending docs to a new segment */ @@ -766,7 +766,7 @@ final class DocumentsWriter { // always get N docs when we flush by doc count, even if // > 1 thread is adding documents: if (!flushPending && - maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH + maxBufferedDocs != IndexWriterConfig.DISABLE_AUTO_FLUSH && numDocsInRAM >= maxBufferedDocs) { flushPending = true; state.doFlushAfter = true; @@ -928,9 +928,9 @@ final class DocumentsWriter { } synchronized boolean deletesFull() { - return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && + return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) || - (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && + (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH && ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms)); } @@ -943,9 +943,9 @@ final class DocumentsWriter { // too-frequent flushing of a long tail of tiny segments // when merges (which always apply deletes) are // infrequent. - return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && + return (ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) || - (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && + (maxBufferedDeleteTerms != IndexWriterConfig.DISABLE_AUTO_FLUSH && ((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms)); } @@ -1115,7 +1115,7 @@ final class DocumentsWriter { } synchronized boolean doBalanceRAM() { - return ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed+deletesInRAM.bytesUsed+deletesFlushed.bytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger); + return ramBufferSize != IndexWriterConfig.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed+deletesInRAM.bytesUsed+deletesFlushed.bytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger); } /** Does the synchronized work to finish/flush the diff --git a/src/java/org/apache/lucene/index/IndexWriter.java b/src/java/org/apache/lucene/index/IndexWriter.java index 28238dd7fcb..fa1d6b08cbe 100644 --- a/src/java/org/apache/lucene/index/IndexWriter.java +++ b/src/java/org/apache/lucene/index/IndexWriter.java @@ -19,7 +19,7 @@ package org.apache.lucene.index; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.DocumentsWriter.IndexingChain; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -29,6 +29,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.util.Constants; import org.apache.lucene.util.ThreadInterruptedException; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.Closeable; @@ -179,10 +180,11 @@ public class IndexWriter implements Closeable { /** * Default value for the write lock timeout (1,000). * @see #setDefaultWriteLockTimeout + * @deprecated use {@link IndexWriterConfig#WRITE_LOCK_TIMEOUT} instead */ - public static long WRITE_LOCK_TIMEOUT = 1000; + public static long WRITE_LOCK_TIMEOUT = IndexWriterConfig.WRITE_LOCK_TIMEOUT; - private long writeLockTimeout = WRITE_LOCK_TIMEOUT; + private long writeLockTimeout; /** * Name of the write lock in the index. @@ -191,36 +193,43 @@ public class IndexWriter implements Closeable { /** * Value to denote a flush trigger is disabled + * @deprecated use {@link IndexWriterConfig#DISABLE_AUTO_FLUSH} instead */ - public final static int DISABLE_AUTO_FLUSH = -1; + public final static int DISABLE_AUTO_FLUSH = IndexWriterConfig.DISABLE_AUTO_FLUSH; /** * Disabled by default (because IndexWriter flushes by RAM usage * by default). Change using {@link #setMaxBufferedDocs(int)}. + * @deprecated use {@link IndexWriterConfig#DEFAULT_MAX_BUFFERED_DOCS} instead. */ - public final static int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH; + public final static int DEFAULT_MAX_BUFFERED_DOCS = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; /** * Default value is 16 MB (which means flush when buffered * docs consume 16 MB RAM). Change using {@link #setRAMBufferSizeMB}. + * @deprecated use {@link IndexWriterConfig#DEFAULT_RAM_BUFFER_SIZE_MB} instead. */ - public final static double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0; + public final static double DEFAULT_RAM_BUFFER_SIZE_MB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; /** * Disabled by default (because IndexWriter flushes by RAM usage * by default). Change using {@link #setMaxBufferedDeleteTerms(int)}. + * @deprecated use {@link IndexWriterConfig#DEFAULT_MAX_BUFFERED_DELETE_TERMS} instead */ - public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH; + public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS; /** * Default value is 10,000. Change using {@link #setMaxFieldLength(int)}. + * + * @deprecated see {@link IndexWriterConfig} */ public final static int DEFAULT_MAX_FIELD_LENGTH = 10000; /** * Default value is 128. Change using {@link #setTermIndexInterval(int)}. + * @deprecated use {@link IndexWriterConfig#DEFAULT_TERM_INDEX_INTERVAL} instead. */ - public final static int DEFAULT_TERM_INDEX_INTERVAL = 128; + public final static int DEFAULT_TERM_INDEX_INTERVAL = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; /** * Absolute hard maximum length for a term. If a term @@ -244,10 +253,11 @@ public class IndexWriter implements Closeable { private int messageID = -1; volatile private boolean hitOOM; - private Directory directory; // where this index resides - private Analyzer analyzer; // how to analyze text + private final Directory directory; // where this index resides + private final Analyzer analyzer; // how to analyze text - private Similarity similarity = Similarity.getDefault(); // how to normalize + // TODO 4.0: this should be made final once the setter is out + private /*final*/Similarity similarity = Similarity.getDefault(); // how to normalize private volatile long changeCount; // increments every time a change is completed private long lastCommitChangeCount; // last changeCount that was committed @@ -270,7 +280,8 @@ public class IndexWriter implements Closeable { private Lock writeLock; - private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL; + // TODO 4.0: this should be made final once the setter is out + private /*final*/int termIndexInterval; private boolean closed; private boolean closing; @@ -280,7 +291,8 @@ public class IndexWriter implements Closeable { private HashSet mergingSegments = new HashSet(); private MergePolicy mergePolicy = new LogByteSizeMergePolicy(this); - private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler(); + // TODO 4.0: this should be made final once the setter is removed + private /*final*/MergeScheduler mergeScheduler; private LinkedList pendingMerges = new LinkedList(); private Set runningMerges = new HashSet(); private List mergeExceptions = new ArrayList(); @@ -307,7 +319,11 @@ public class IndexWriter implements Closeable { // deletes, doing merges, and reopening near real-time // readers. private volatile boolean poolReaders; - + + // The instance that was passed to the constructor. It is saved only in order + // to allow users to query an IndexWriter settings. + private final IndexWriterConfig config; + /** * Expert: returns a readonly reader, covering all * committed as well as un-committed changes to the index. @@ -777,19 +793,29 @@ public class IndexWriter implements Closeable { * Otherwise an IllegalArgumentException is thrown.

* * @see #setUseCompoundFile(boolean) + * @deprecated use {@link LogMergePolicy#getUseCompoundDocStore()} and + * {@link LogMergePolicy#getUseCompoundFile()} directly. */ public boolean getUseCompoundFile() { return getLogMergePolicy().getUseCompoundFile(); } - /**

Setting to turn on usage of a compound file. When on, - * multiple files for each segment are merged into a - * single file when a new segment is flushed.

- * - *

Note that this method is a convenience method: it - * just calls mergePolicy.setUseCompoundFile as long as - * mergePolicy is an instance of {@link LogMergePolicy}. - * Otherwise an IllegalArgumentException is thrown.

+ /** + *

+ * Setting to turn on usage of a compound file. When on, multiple files for + * each segment are merged into a single file when a new segment is flushed. + *

+ * + *

+ * Note that this method is a convenience method: it just calls + * mergePolicy.setUseCompoundFile as long as mergePolicy is an instance of + * {@link LogMergePolicy}. Otherwise an IllegalArgumentException is thrown. + *

+ * + * @deprecated use {@link LogMergePolicy#setUseCompoundDocStore(boolean)} and + * {@link LogMergePolicy#setUseCompoundFile(boolean)} directly. + * Note that this method set the given value on both, therefore + * you should consider doing the same. */ public void setUseCompoundFile(boolean value) { getLogMergePolicy().setUseCompoundFile(value); @@ -799,20 +825,25 @@ public class IndexWriter implements Closeable { /** Expert: Set the Similarity implementation used by this IndexWriter. * * @see Similarity#setDefault(Similarity) + * @deprecated use {@link IndexWriterConfig#setSimilarity(Similarity)} instead */ public void setSimilarity(Similarity similarity) { ensureOpen(); this.similarity = similarity; docWriter.setSimilarity(similarity); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setSimilarity(similarity); } /** Expert: Return the Similarity implementation used by this IndexWriter. * *

This defaults to the current value of {@link Similarity#getDefault()}. + * @deprecated use {@link IndexWriterConfig#getSimilarity()} instead */ public Similarity getSimilarity() { ensureOpen(); - return this.similarity; + return similarity; } /** Expert: Set the interval between indexed terms. Large values cause less @@ -835,15 +866,20 @@ public class IndexWriter implements Closeable { * must be scanned for each random term access. * * @see #DEFAULT_TERM_INDEX_INTERVAL + * @deprecated use {@link IndexWriterConfig#setTermIndexInterval(int)} */ public void setTermIndexInterval(int interval) { ensureOpen(); this.termIndexInterval = interval; + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setTermIndexInterval(interval); } /** Expert: Return the interval between indexed terms. * * @see #setTermIndexInterval(int) + * @deprecated use {@link IndexWriterConfig#getTermIndexInterval()} */ public int getTermIndexInterval() { // We pass false because this method is called by SegmentMerger while we are in the process of closing @@ -872,10 +908,13 @@ public class IndexWriter implements Closeable { * if it does not exist and create is * false or if there is any other low-level * IO error + * @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead */ public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, null, mfl.getLimit(), null, null); + this(d, new IndexWriterConfig(Version.LUCENE_31).setAnalyzer(a).setOpenMode( + create ? OpenMode.CREATE : OpenMode.APPEND).setMaxFieldLength( + mfl.getLimit())); } /** @@ -895,10 +934,12 @@ public class IndexWriter implements Closeable { * @throws IOException if the directory cannot be * read/written to or if there is any other low-level * IO error + * @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead */ public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, null, mfl.getLimit(), null, null); + this(d, new IndexWriterConfig(Version.LUCENE_31).setAnalyzer(a) + .setMaxFieldLength(mfl.getLimit())); } /** @@ -918,10 +959,13 @@ public class IndexWriter implements Closeable { * @throws IOException if the directory cannot be * read/written to or if there is any other low-level * IO error + * @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead */ public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, deletionPolicy, mfl.getLimit(), null, null); + this(d, new IndexWriterConfig(Version.LUCENE_31).setAnalyzer(a) + .setMaxFieldLength(mfl.getLimit()).setIndexDeletionPolicy( + deletionPolicy)); } /** @@ -947,43 +991,13 @@ public class IndexWriter implements Closeable { * if it does not exist and create is * false or if there is any other low-level * IO error + * @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead */ public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, deletionPolicy, mfl.getLimit(), null, null); - } - - /** - * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy} and {@link IndexingChain}, - * for the index in d. - * Text will be analyzed with a. If - * create is true, then a new, empty index - * will be created in d, replacing the index - * already there, if any. - * - * @param d the index directory - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @param deletionPolicy see above - * @param mfl whether or not to limit field lengths, value is in number of terms/tokens. See {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}. - * @param indexingChain the {@link DocConsumer} chain to be used to - * process documents - * @param commit which commit to open - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - */ - IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, deletionPolicy, mfl.getLimit(), indexingChain, commit); + this(d, new IndexWriterConfig(Version.LUCENE_31).setAnalyzer(a).setOpenMode( + create ? OpenMode.CREATE : OpenMode.APPEND).setMaxFieldLength( + mfl.getLimit()).setIndexDeletionPolicy(deletionPolicy)); } /** @@ -1017,44 +1031,74 @@ public class IndexWriter implements Closeable { * if it does not exist and create is * false or if there is any other low-level * IO error + * @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead */ public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, deletionPolicy, mfl.getLimit(), null, commit); + this(d, new IndexWriterConfig(Version.LUCENE_31).setAnalyzer(a) + .setOpenMode(OpenMode.APPEND).setMaxFieldLength(mfl.getLimit()) + .setIndexDeletionPolicy(deletionPolicy).setIndexCommit(commit)); } - private void init(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, - int maxFieldLength, IndexingChain indexingChain, IndexCommit commit) - throws CorruptIndexException, LockObtainFailedException, IOException { - if (IndexReader.indexExists(d)) { - init(d, a, false, deletionPolicy, maxFieldLength, indexingChain, commit); - } else { - init(d, a, true, deletionPolicy, maxFieldLength, indexingChain, commit); - } - } - - private void init(Directory d, Analyzer a, final boolean create, - IndexDeletionPolicy deletionPolicy, int maxFieldLength, - IndexingChain indexingChain, IndexCommit commit) - throws CorruptIndexException, LockObtainFailedException, IOException { - + /** + * Constructs a new IndexWriter per the settings given in conf. + * Note that the passed in {@link IndexWriterConfig} is cloned and thus making + * changes to it after IndexWriter has been instantiated will not affect + * IndexWriter. Additionally, calling {@link #getConfig()} and changing the + * parameters does not affect that IndexWriter instance. + *

+ * NOTE: by default, {@link IndexWriterConfig#getMaxFieldLength()} + * returns {@link IndexWriterConfig#UNLIMITED_FIELD_LENGTH}. Pay attention to + * whether this setting fits your application. + * + * @param d + * the index directory. The index is either created or appended + * according conf.getOpenMode(). + * @param conf + * the configuration settings according to which IndexWriter should + * be initalized. + * @throws CorruptIndexException + * if the index is corrupt + * @throws LockObtainFailedException + * if another writer has this index open (write.lock + * could not be obtained) + * @throws IOException + * if the directory cannot be read/written to, or if it does not + * exist and conf.getOpenMode() is + * OpenMode.APPEND or if there is any other low-level + * IO error + */ + public IndexWriter(Directory d, IndexWriterConfig conf) + throws CorruptIndexException, LockObtainFailedException, IOException { + config = (IndexWriterConfig) conf.clone(); directory = d; - analyzer = a; + analyzer = conf.getAnalyzer(); setMessageID(defaultInfoStream); - this.maxFieldLength = maxFieldLength; + maxFieldLength = conf.getMaxFieldLength(); + termIndexInterval = conf.getTermIndexInterval(); + writeLockTimeout = conf.getWriteLockTimeout(); + similarity = conf.getSimilarity(); + mergeScheduler = conf.getMergeScheduler(); - if (indexingChain == null) - indexingChain = DocumentsWriter.DefaultIndexingChain; + OpenMode mode = conf.getOpenMode(); + boolean create; + if (mode == OpenMode.CREATE) { + create = true; + } else if (mode == OpenMode.APPEND) { + create = false; + } else { + // CREATE_OR_APPEND - create only if an index does not exist + create = !IndexReader.indexExists(directory); + } if (create) { // Clear the write lock in case it's leftover: directory.clearLock(WRITE_LOCK_NAME); } - Lock writeLock = directory.makeLock(WRITE_LOCK_NAME); + writeLock = directory.makeLock(WRITE_LOCK_NAME); if (!writeLock.obtain(writeLockTimeout)) // obtain write lock throw new LockObtainFailedException("Index locked for write: " + writeLock); - this.writeLock = writeLock; // save it try { if (create) { @@ -1085,6 +1129,7 @@ public class IndexWriter implements Closeable { } else { segmentInfos.read(directory); + IndexCommit commit = conf.getIndexCommit(); if (commit != null) { // Swap out all segments, but, keep metadata in // SegmentInfos, like version & generation, to @@ -1108,14 +1153,14 @@ public class IndexWriter implements Closeable { setRollbackSegmentInfos(segmentInfos); - docWriter = new DocumentsWriter(directory, this, indexingChain); + docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain()); docWriter.setInfoStream(infoStream); docWriter.setMaxFieldLength(maxFieldLength); // Default deleter (for backwards compatibility) is // KeepOnlyLastCommitDeleter: deleter = new IndexFileDeleter(directory, - deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy, + conf.getIndexDeletionPolicy(), segmentInfos, infoStream, docWriter); if (deleter.startingCommitDeleted) @@ -1125,20 +1170,22 @@ public class IndexWriter implements Closeable { // segments_N file. changeCount++; + docWriter.setMaxBufferedDeleteTerms(conf.getMaxBufferedDeleteTerms()); + docWriter.setRAMBufferSizeMB(conf.getRAMBufferSizeMB()); + docWriter.setMaxBufferedDocs(conf.getMaxBufferedDocs()); pushMaxBufferedDocs(); if (infoStream != null) { - message("init: create=" + create); messageState(); } } catch (IOException e) { - this.writeLock.release(); - this.writeLock = null; + writeLock.release(); + writeLock = null; throw e; } } - + private synchronized void setRollbackSegmentInfos(SegmentInfos infos) { rollbackSegmentInfos = (SegmentInfos) infos.clone(); assert !rollbackSegmentInfos.hasExternalSegments(directory); @@ -1148,6 +1195,19 @@ public class IndexWriter implements Closeable { rollbackSegments.put(rollbackSegmentInfos.info(i), Integer.valueOf(i)); } + /** + * Returns the {@link IndexWriterConfig} that was passed to + * {@link #IndexWriter(Directory, IndexWriterConfig)}. This allows querying + * IndexWriter's settings. + *

+ * NOTE: setting any parameter on the returned instance has not effect + * on the IndexWriter instance. If you need to change those settings after + * IndexWriter has been created, you need to instantiate a new IndexWriter. + */ + public IndexWriterConfig getConfig() { + return config; + } + /** * Expert: set the merge policy used by this writer. */ @@ -1175,6 +1235,7 @@ public class IndexWriter implements Closeable { /** * Expert: set the merge scheduler used by this writer. + * @deprecated use {@link IndexWriterConfig#setMergeScheduler(MergeScheduler)} instead */ synchronized public void setMergeScheduler(MergeScheduler mergeScheduler) throws CorruptIndexException, IOException { ensureOpen(); @@ -1188,12 +1249,16 @@ public class IndexWriter implements Closeable { this.mergeScheduler = mergeScheduler; if (infoStream != null) message("setMergeScheduler " + mergeScheduler); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setMergeScheduler(mergeScheduler); } /** - * Expert: returns the current MergePolicy in use by this + * Expert: returns the current MergeScheduler in use by this * writer. - * @see #setMergePolicy + * @see #setMergeScheduler(MergeScheduler) + * @deprecated use {@link IndexWriterConfig#getMergeScheduler()} instead */ public MergeScheduler getMergeScheduler() { ensureOpen(); @@ -1219,6 +1284,7 @@ public class IndexWriter implements Closeable { * LogByteSizeMergePolicy}) also allows you to set this * limit by net size (in MB) of the segment, using {@link * LogByteSizeMergePolicy#setMaxMergeMB}.

+ * @deprecated use {@link LogMergePolicy#setMaxMergeDocs(int)} directly. */ public void setMaxMergeDocs(int maxMergeDocs) { getLogMergePolicy().setMaxMergeDocs(maxMergeDocs); @@ -1234,6 +1300,7 @@ public class IndexWriter implements Closeable { * Otherwise an IllegalArgumentException is thrown.

* * @see #setMaxMergeDocs + * @deprecated use {@link LogMergePolicy#getMaxMergeDocs()} directly. */ public int getMaxMergeDocs() { return getLogMergePolicy().getMaxMergeDocs(); @@ -1252,6 +1319,7 @@ public class IndexWriter implements Closeable { * is your memory, but you should anticipate an OutOfMemoryError.

* By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms * will be indexed for a field. + * @deprecated use {@link IndexWriterConfig#setMaxFieldLength(int)} instead */ public void setMaxFieldLength(int maxFieldLength) { ensureOpen(); @@ -1259,12 +1327,16 @@ public class IndexWriter implements Closeable { docWriter.setMaxFieldLength(maxFieldLength); if (infoStream != null) message("setMaxFieldLength " + maxFieldLength); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setMaxFieldLength(maxFieldLength); } /** * Returns the maximum number of terms that will be * indexed for a single field in a document. * @see #setMaxFieldLength + * @deprecated use {@link IndexWriterConfig#getMaxFieldLength()} instead */ public int getMaxFieldLength() { ensureOpen(); @@ -1289,6 +1361,7 @@ public class IndexWriter implements Closeable { * enabled but smaller than 2, or it disables maxBufferedDocs * when ramBufferSize is already disabled * @see #setRAMBufferSizeMB + * @deprecated use {@link IndexWriterConfig#setMaxBufferedDocs(int)} instead. */ public void setMaxBufferedDocs(int maxBufferedDocs) { ensureOpen(); @@ -1303,6 +1376,9 @@ public class IndexWriter implements Closeable { pushMaxBufferedDocs(); if (infoStream != null) message("setMaxBufferedDocs " + maxBufferedDocs); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setMaxBufferedDocs(maxBufferedDocs); } /** @@ -1329,6 +1405,7 @@ public class IndexWriter implements Closeable { * Returns the number of buffered added documents that will * trigger a flush if enabled. * @see #setMaxBufferedDocs + * @deprecated use {@link IndexWriterConfig#getMaxBufferedDocs()} instead. */ public int getMaxBufferedDocs() { ensureOpen(); @@ -1372,6 +1449,7 @@ public class IndexWriter implements Closeable { * @throws IllegalArgumentException if ramBufferSize is * enabled but non-positive, or it disables ramBufferSize * when maxBufferedDocs is already disabled + * @deprecated use {@link IndexWriterConfig#setRAMBufferSizeMB(double)} instead. */ public void setRAMBufferSizeMB(double mb) { if (mb > 2048.0) { @@ -1386,10 +1464,14 @@ public class IndexWriter implements Closeable { docWriter.setRAMBufferSizeMB(mb); if (infoStream != null) message("setRAMBufferSizeMB " + mb); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setRAMBufferSizeMB(mb); } /** * Returns the value set by {@link #setRAMBufferSizeMB} if enabled. + * @deprecated use {@link IndexWriterConfig#getRAMBufferSizeMB()} instead. */ public double getRAMBufferSizeMB() { return docWriter.getRAMBufferSizeMB(); @@ -1406,6 +1488,7 @@ public class IndexWriter implements Closeable { * @throws IllegalArgumentException if maxBufferedDeleteTerms * is enabled but smaller than 1 * @see #setRAMBufferSizeMB + * @deprecated use {@link IndexWriterConfig#setMaxBufferedDeleteTerms(int)} instead. */ public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) { ensureOpen(); @@ -1416,12 +1499,16 @@ public class IndexWriter implements Closeable { docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms); if (infoStream != null) message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms); + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms); } /** * Returns the number of buffered deleted terms that will * trigger a flush if enabled. * @see #setMaxBufferedDeleteTerms + * @deprecated use {@link IndexWriterConfig#getMaxBufferedDeleteTerms()} instead */ public int getMaxBufferedDeleteTerms() { ensureOpen(); @@ -1442,6 +1529,7 @@ public class IndexWriter implements Closeable { * Otherwise an IllegalArgumentException is thrown.

* *

This must never be less than 2. The default value is 10. + * @deprecated use {@link LogMergePolicy#setMergeFactor(int)} directly. */ public void setMergeFactor(int mergeFactor) { getLogMergePolicy().setMergeFactor(mergeFactor); @@ -1458,6 +1546,7 @@ public class IndexWriter implements Closeable { * Otherwise an IllegalArgumentException is thrown.

* * @see #setMergeFactor + * @deprecated use {@link LogMergePolicy#getMergeFactor()} directly. */ public int getMergeFactor() { return getLogMergePolicy().getMergeFactor(); @@ -1494,15 +1583,11 @@ public class IndexWriter implements Closeable { } private void messageState() { - message("setInfoStream: dir=" + directory + - " mergePolicy=" + mergePolicy + - " mergeScheduler=" + mergeScheduler + - " ramBufferSizeMB=" + docWriter.getRAMBufferSizeMB() + - " maxBufferedDocs=" + docWriter.getMaxBufferedDocs() + - " maxBuffereDeleteTerms=" + docWriter.getMaxBufferedDeleteTerms() + - " maxFieldLength=" + maxFieldLength + - " index=" + segString() + - " version=" + Constants.LUCENE_VERSION); + message("\ndir=" + directory + "\n" + + "mergePolicy=" + mergePolicy + "\n" + + "index=" + segString() + "\n" + + "version=" + Constants.LUCENE_VERSION + "\n" + + config.toString()); } /** @@ -1522,15 +1607,20 @@ public class IndexWriter implements Closeable { /** * Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter. @see * @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter. + * @deprecated use {@link IndexWriterConfig#setWriteLockTimeout(long)} instead */ public void setWriteLockTimeout(long writeLockTimeout) { ensureOpen(); this.writeLockTimeout = writeLockTimeout; + // Required so config.getSimilarity returns the right value. But this will + // go away together with the method in 4.0. + config.setWriteLockTimeout(writeLockTimeout); } /** * Returns allowed timeout when acquiring the write lock. * @see #setWriteLockTimeout + * @deprecated use {@link IndexWriterConfig#getWriteLockTimeout()} */ public long getWriteLockTimeout() { ensureOpen(); @@ -1540,18 +1630,20 @@ public class IndexWriter implements Closeable { /** * Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in * milliseconds). + * @deprecated use {@link IndexWriterConfig#setDefaultWriteLockTimeout(long)} instead */ public static void setDefaultWriteLockTimeout(long writeLockTimeout) { - IndexWriter.WRITE_LOCK_TIMEOUT = writeLockTimeout; + IndexWriterConfig.setDefaultWriteLockTimeout(writeLockTimeout); } /** * Returns default write lock timeout for newly * instantiated IndexWriters. * @see #setDefaultWriteLockTimeout + * @deprecated use {@link IndexWriterConfig#getDefaultWriteLockTimeout()} instead */ public static long getDefaultWriteLockTimeout() { - return IndexWriter.WRITE_LOCK_TIMEOUT; + return IndexWriterConfig.getDefaultWriteLockTimeout(); } /** @@ -4785,9 +4877,13 @@ public class IndexWriter implements Closeable { } /** - * Specifies maximum field length (in number of tokens/terms) in {@link IndexWriter} constructors. - * {@link #setMaxFieldLength(int)} overrides the value set by - * the constructor. + * Specifies maximum field length (in number of tokens/terms) in + * {@link IndexWriter} constructors. {@link #setMaxFieldLength(int)} overrides + * the value set by the constructor. + * + * @deprecated use {@link IndexWriterConfig} and pass + * {@link IndexWriterConfig#UNLIMITED_FIELD_LENGTH} or your own + * value. */ public static final class MaxFieldLength { diff --git a/src/java/org/apache/lucene/index/SegmentMerger.java b/src/java/org/apache/lucene/index/SegmentMerger.java index b58c92af014..8cbb6e52646 100644 --- a/src/java/org/apache/lucene/index/SegmentMerger.java +++ b/src/java/org/apache/lucene/index/SegmentMerger.java @@ -48,7 +48,7 @@ final class SegmentMerger { private Directory directory; private String segment; - private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL; + private int termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; private List readers = new ArrayList(); private FieldInfos fieldInfos; @@ -96,7 +96,7 @@ final class SegmentMerger { } }; } - termIndexInterval = writer.getTermIndexInterval(); + termIndexInterval = writer.getConfig().getTermIndexInterval(); } boolean hasProx() { diff --git a/src/test/org/apache/lucene/TestDemo.java b/src/test/org/apache/lucene/TestDemo.java index 0e0becefcbb..7b71a980ba3 100644 --- a/src/test/org/apache/lucene/TestDemo.java +++ b/src/test/org/apache/lucene/TestDemo.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; @@ -49,8 +50,9 @@ public class TestDemo extends LuceneTestCase { Directory directory = new RAMDirectory(); // To store an index on disk, use this instead: //Directory directory = FSDirectory.open("/tmp/testindex"); - IndexWriter iwriter = new IndexWriter(directory, analyzer, true, - new IndexWriter.MaxFieldLength(25000)); + IndexWriter iwriter = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxFieldLength(25000)); + Document doc = new Document(); String text = "This is the text to be indexed."; doc.add(new Field("fieldname", text, Field.Store.YES, diff --git a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index d2cf0b8a6f9..79b82d5b35c 100644 --- a/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ b/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -18,9 +18,9 @@ package org.apache.lucene; */ import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.document.Document; @@ -86,15 +86,14 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - MyMergeScheduler ms = new MyMergeScheduler(); - writer.setMergeScheduler(ms); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMergeScheduler(new MyMergeScheduler()) + .setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH)); for(int i=0;i<20;i++) writer.addDocument(doc); - ms.sync(); + ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); writer.close(); assertTrue(mergeThreadCreated); diff --git a/src/test/org/apache/lucene/TestSearch.java b/src/test/org/apache/lucene/TestSearch.java index ca85c3fcd02..0c4e08697bf 100644 --- a/src/test/org/apache/lucene/TestSearch.java +++ b/src/test/org/apache/lucene/TestSearch.java @@ -70,14 +70,14 @@ public class TestSearch extends LuceneTestCase { private void doTestSearch(PrintWriter out, boolean useCompoundFile) - throws Exception - { + throws Exception { Directory directory = new RAMDirectory(); Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(directory, analyzer, true, - IndexWriter.MaxFieldLength.LIMITED); - - writer.setUseCompoundFile(useCompoundFile); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); String[] docs = { "a b c d e", diff --git a/src/test/org/apache/lucene/TestSearchForDuplicates.java b/src/test/org/apache/lucene/TestSearchForDuplicates.java index 5f0a13dff1c..e16e02b4473 100644 --- a/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ b/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -78,10 +78,11 @@ public class TestSearchForDuplicates extends LuceneTestCase { private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception { Directory directory = new RAMDirectory(); Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(directory, analyzer, true, - IndexWriter.MaxFieldLength.LIMITED); - - writer.setUseCompoundFile(useCompoundFiles); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFiles); + lmp.setUseCompoundDocStore(useCompoundFiles); final int MAX_DOCS = 225; diff --git a/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java b/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java index 6ce929274f4..3ea3536ffef 100644 --- a/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java +++ b/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.TestIndexWriter; @@ -67,9 +68,10 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); - // Force frequent flushes - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer( + new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp) + .setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<7;i++) { @@ -83,7 +85,9 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase writer.close(); copyFiles(dir, cp); - writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT)) + .setIndexDeletionPolicy(dp)); copyFiles(dir, cp); for(int i=0;i<7;i++) { writer.addDocument(doc); @@ -95,7 +99,9 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase writer.close(); copyFiles(dir, cp); dp.release(); - writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT)) + .setIndexDeletionPolicy(dp)); writer.close(); try { copyFiles(dir, cp); @@ -111,10 +117,10 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase final long stopTime = System.currentTimeMillis() + 1000; SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); - - // Force frequent flushes - writer.setMaxBufferedDocs(2); + final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer( + new StandardAnalyzer(TEST_VERSION_CURRENT)).setIndexDeletionPolicy(dp) + .setMaxBufferedDocs(2)); final Thread t = new Thread() { @Override diff --git a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index 30602a7e4d1..233fb88d92c 100644 --- a/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ b/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -27,6 +27,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositions; import org.apache.lucene.store.Directory; @@ -37,7 +38,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase { public void testCaching() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); TokenStream stream = new TokenStream() { private int index = 0; diff --git a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java index 3986d246456..47220add752 100644 --- a/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java +++ b/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java @@ -24,6 +24,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; import org.apache.lucene.queryParser.QueryParser; @@ -41,9 +42,9 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, - new SimpleAnalyzer(TEST_VERSION_CURRENT), - true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new SimpleAnalyzer( + TEST_VERSION_CURRENT))); Document doc = new Document(); doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED)); @@ -70,7 +71,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { public void testMutipleDocument() throws Exception { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); diff --git a/src/test/org/apache/lucene/collation/CollationTestBase.java b/src/test/org/apache/lucene/collation/CollationTestBase.java index 935234b795c..642d9ad713f 100644 --- a/src/test/org/apache/lucene/collation/CollationTestBase.java +++ b/src/test/org/apache/lucene/collation/CollationTestBase.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.PerFieldAnalyzerWrapper; import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; @@ -69,8 +70,7 @@ public class CollationTestBase extends LuceneTestCase { String firstEnd, String secondBeg, String secondEnd) throws Exception { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter writer = new IndexWriter - (ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED)); @@ -101,8 +101,7 @@ public class CollationTestBase extends LuceneTestCase { String firstEnd, String secondBeg, String secondEnd) throws Exception { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter writer = new IndexWriter - (ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi @@ -125,13 +124,11 @@ public class CollationTestBase extends LuceneTestCase { searcher.close(); } - public void testFarsiTermRangeQuery - (Analyzer analyzer, String firstBeg, String firstEnd, - String secondBeg, String secondEnd) throws Exception { + public void testFarsiTermRangeQuery(Analyzer analyzer, String firstBeg, + String firstEnd, String secondBeg, String secondEnd) throws Exception { RAMDirectory farsiIndex = new RAMDirectory(); - IndexWriter writer = new IndexWriter - (farsiIndex, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, Field.Index.ANALYZED)); @@ -178,8 +175,7 @@ public class CollationTestBase extends LuceneTestCase { analyzer.addAnalyzer("France", franceAnalyzer); analyzer.addAnalyzer("Sweden", swedenAnalyzer); analyzer.addAnalyzer("Denmark", denmarkAnalyzer); - IndexWriter writer = new IndexWriter - (indexStore, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); // document data: // the tracer field is used to determine which document was hit diff --git a/src/test/org/apache/lucene/document/TestBinaryDocument.java b/src/test/org/apache/lucene/document/TestBinaryDocument.java index 759036da441..1e4d45d35b2 100644 --- a/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ b/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -2,9 +2,9 @@ package org.apache.lucene.document; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.MockRAMDirectory; /** @@ -27,8 +27,7 @@ import org.apache.lucene.store.MockRAMDirectory; /** * Tests {@link Document} class. */ -public class TestBinaryDocument extends LuceneTestCase -{ +public class TestBinaryDocument extends LuceneTestCase { String binaryValStored = "this text will be stored as a byte array in the index"; String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"; @@ -58,7 +57,7 @@ public class TestBinaryDocument extends LuceneTestCase /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.addDocument(doc); writer.close(); @@ -83,9 +82,7 @@ public class TestBinaryDocument extends LuceneTestCase dir.close(); } - public void testCompressionTools() - throws Exception - { + public void testCompressionTools() throws Exception { Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed)); @@ -96,7 +93,7 @@ public class TestBinaryDocument extends LuceneTestCase /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.addDocument(doc); writer.close(); diff --git a/src/test/org/apache/lucene/document/TestDocument.java b/src/test/org/apache/lucene/document/TestDocument.java index ad7cd25e279..6cc20059740 100644 --- a/src/test/org/apache/lucene/document/TestDocument.java +++ b/src/test/org/apache/lucene/document/TestDocument.java @@ -2,6 +2,7 @@ package org.apache.lucene.document; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -151,10 +152,11 @@ public class TestDocument extends LuceneTestCase * * @throws Exception on error */ - public void testGetValuesForIndexedDocument() throws Exception - { + public void testGetValuesForIndexedDocument() throws Exception { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); writer.addDocument(makeDocumentWithFields()); writer.close(); @@ -225,7 +227,9 @@ public class TestDocument extends LuceneTestCase doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED)); RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); writer.addDocument(doc); field.setValue("id2"); writer.addDocument(doc); diff --git a/src/test/org/apache/lucene/index/DocHelper.java b/src/test/org/apache/lucene/index/DocHelper.java index 234c8740dec..1f2aa348195 100644 --- a/src/test/org/apache/lucene/index/DocHelper.java +++ b/src/test/org/apache/lucene/index/DocHelper.java @@ -232,10 +232,9 @@ class DocHelper { * @param doc * @throws IOException */ - public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException - { - IndexWriter writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); - writer.setSimilarity(similarity); + public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc) throws IOException { + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setSimilarity(similarity)); //writer.setUseCompoundFile(false); writer.addDocument(doc); writer.commit(); diff --git a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java index 9a387c86934..3a1e6badf70 100755 --- a/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java +++ b/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java @@ -20,9 +20,9 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; @@ -39,27 +39,28 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { IndexWriter writer = null; - writer = newWriter(dir, true); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); // add 100 documents addDocs(writer, 100); assertEquals(100, writer.maxDoc()); writer.close(); - writer = newWriter(aux, true); - writer.setUseCompoundFile(false); // use one without a compound file + writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file // add 40 documents in separate files addDocs(writer, 40); assertEquals(40, writer.maxDoc()); writer.close(); - writer = newWriter(aux2, true); + writer = newWriter(aux2, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE)); // add 40 documents in compound files addDocs2(writer, 50); assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged - writer = newWriter(dir, false); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); assertEquals(100, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); assertEquals(190, writer.maxDoc()); @@ -73,14 +74,14 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { // now add another set in. Directory aux3 = new RAMDirectory(); - writer = newWriter(aux3, true); + writer = newWriter(aux3, new IndexWriterConfig(TEST_VERSION_CURRENT)); // add 40 documents addDocs(writer, 40); assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged/index is optimized - writer = newWriter(dir, false); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); assertEquals(190, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux3 }); assertEquals(230, writer.maxDoc()); @@ -94,7 +95,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { verifyTermDocs(dir, new Term("content", "bbb"), 50); // now optimize it. - writer = newWriter(dir, false); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.optimize(); writer.close(); @@ -107,11 +108,11 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { // now add a single document Directory aux4 = new RAMDirectory(); - writer = newWriter(aux4, true); + writer = newWriter(aux4, new IndexWriterConfig(TEST_VERSION_CURRENT)); addDocs2(writer, 1); writer.close(); - writer = newWriter(dir, false); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); assertEquals(230, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux4 }); assertEquals(231, writer.maxDoc()); @@ -129,7 +130,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { Directory aux = new RAMDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); + IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.addIndexesNoOptimize(new Directory[] {aux}); // Adds 10 docs, then replaces them with another 10 @@ -166,7 +167,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { Directory aux = new RAMDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); + IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -205,7 +206,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { Directory aux = new RAMDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); + IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -246,25 +247,25 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { IndexWriter writer = null; - writer = newWriter(dir, true); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); // add 100 documents addDocs(writer, 100); assertEquals(100, writer.maxDoc()); writer.close(); - writer = newWriter(aux, true); - writer.setUseCompoundFile(false); // use one without a compound file - writer.setMaxBufferedDocs(1000); + writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file // add 140 documents in separate files addDocs(writer, 40); writer.close(); - writer = newWriter(aux, true); - writer.setUseCompoundFile(false); // use one without a compound file - writer.setMaxBufferedDocs(1000); + writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file addDocs(writer, 100); writer.close(); - writer = newWriter(dir, false); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); try { // cannot add self writer.addIndexesNoOptimize(new Directory[] { aux, dir }); @@ -290,9 +291,10 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(4); + IndexWriter writer = newWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs( + 10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); addDocs(writer, 10); writer.addIndexesNoOptimize(new Directory[] { aux }); @@ -314,9 +316,8 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); - writer.setMaxBufferedDocs(9); - writer.setMergeFactor(4); + IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(9)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); addDocs(writer, 2); writer.addIndexesNoOptimize(new Directory[] { aux }); @@ -338,9 +339,10 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, false); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(4); + IndexWriter writer = newWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs( + 10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); assertEquals(1060, writer.maxDoc()); @@ -367,9 +369,10 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { assertEquals(10, reader.numDocs()); reader.close(); - IndexWriter writer = newWriter(dir, false); - writer.setMaxBufferedDocs(4); - writer.setMergeFactor(4); + IndexWriter writer = newWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND) + .setMaxBufferedDocs(4)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); assertEquals(1020, writer.maxDoc()); @@ -390,9 +393,10 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { setUpDirs(dir, aux); - IndexWriter writer = newWriter(aux2, true); - writer.setMaxBufferedDocs(100); - writer.setMergeFactor(10); + IndexWriter writer = newWriter(aux2, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs( + 100)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); writer.addIndexesNoOptimize(new Directory[] { aux }); assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); @@ -412,9 +416,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { assertEquals(22, reader.numDocs()); reader.close(); - writer = newWriter(dir, false); - writer.setMaxBufferedDocs(6); - writer.setMergeFactor(4); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(6)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); assertEquals(1025, writer.maxDoc()); @@ -425,9 +429,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { verifyNumDocs(dir, 1025); } - private IndexWriter newWriter(Directory dir, boolean create) + private IndexWriter newWriter(Directory dir, IndexWriterConfig conf) throws IOException { - final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter writer = new IndexWriter(dir, conf); writer.setMergePolicy(new LogDocMergePolicy(writer)); return writer; } @@ -471,26 +475,25 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { private void setUpDirs(Directory dir, Directory aux) throws IOException { IndexWriter writer = null; - writer = newWriter(dir, true); - writer.setMaxBufferedDocs(1000); + writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); // add 1000 documents in 1 segment addDocs(writer, 1000); assertEquals(1000, writer.maxDoc()); assertEquals(1, writer.getSegmentCount()); writer.close(); - writer = newWriter(aux, true); - writer.setUseCompoundFile(false); // use one without a compound file - writer.setMaxBufferedDocs(100); - writer.setMergeFactor(10); + writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); // add 30 documents in 3 segments for (int i = 0; i < 3; i++) { addDocs(writer, 10); writer.close(); - writer = newWriter(aux, false); - writer.setUseCompoundFile(false); // use one without a compound file - writer.setMaxBufferedDocs(100); - writer.setMergeFactor(10); + writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(100)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); } assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); @@ -501,18 +504,19 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { public void testHangOnClose() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMergePolicy(new LogByteSizeMergePolicy(writer)); - writer.setMaxBufferedDocs(5); - writer.setUseCompoundFile(false); - writer.setMergeFactor(100); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(5)); + LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); + lmp.setMergeFactor(100); + writer.setMergePolicy(lmp); Document doc = new Document(); doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<60;i++) writer.addDocument(doc); - writer.setMaxBufferedDocs(200); + Document doc2 = new Document(); doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, Field.Index.NO)); @@ -527,13 +531,13 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { writer.close(); Directory dir2 = new MockRAMDirectory(); - writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer); + writer = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT).setMergeScheduler(new SerialMergeScheduler())); + lmp = new LogByteSizeMergePolicy(writer); lmp.setMinMergeMB(0.0001); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); + lmp.setMergeFactor(4); writer.setMergePolicy(lmp); - writer.setMergeFactor(4); - writer.setUseCompoundFile(false); - writer.setMergeScheduler(new SerialMergeScheduler()); writer.addIndexesNoOptimize(new Directory[] {dir}); writer.close(); dir.close(); @@ -544,14 +548,16 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase { // is respected when copying tail segments public void testTargetCFS() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = newWriter(dir, true); - writer.setUseCompoundFile(false); + IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); addDocs(writer, 1); writer.close(); Directory other = new RAMDirectory(); - writer = newWriter(other, true); - writer.setUseCompoundFile(true); + writer = newWriter(other, new IndexWriterConfig(TEST_VERSION_CURRENT)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(true); writer.addIndexesNoOptimize(new Directory[] {dir}); assertTrue(writer.newestSegment().getUseCompoundFile()); writer.close(); diff --git a/src/test/org/apache/lucene/index/TestAtomicUpdate.java b/src/test/org/apache/lucene/index/TestAtomicUpdate.java index 570a1c2b63c..04692a04917 100644 --- a/src/test/org/apache/lucene/index/TestAtomicUpdate.java +++ b/src/test/org/apache/lucene/index/TestAtomicUpdate.java @@ -19,20 +19,19 @@ package org.apache.lucene.index; import org.apache.lucene.util.*; import org.apache.lucene.store.*; import org.apache.lucene.document.*; -import org.apache.lucene.analysis.*; import java.util.Random; import java.io.File; import java.io.IOException; public class TestAtomicUpdate extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); - private Random RANDOM; + + private static final class MockIndexWriter extends IndexWriter { - public class MockIndexWriter extends IndexWriter { + static Random RANDOM; - public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } @Override @@ -126,9 +125,8 @@ public class TestAtomicUpdate extends LuceneTestCase { TimedThread[] threads = new TimedThread[4]; - IndexWriter writer = new MockIndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(7); - writer.setMergeFactor(3); + IndexWriter writer = new MockIndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(7)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3); // Establish a base index of 100 docs: for(int i=0;i<100;i++) { @@ -183,7 +181,7 @@ public class TestAtomicUpdate extends LuceneTestCase { FSDirectory. */ public void testAtomicUpdates() throws Exception { - RANDOM = newRandom(); + MockIndexWriter.RANDOM = newRandom(); Directory directory; // First in a RAM directory: diff --git a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index b174e9532b7..cbb5a58f2e9 100644 --- a/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -32,12 +32,12 @@ import java.util.ArrayList; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; @@ -52,8 +52,7 @@ import org.apache.lucene.util._TestUtil; against it, and add documents to it. */ -public class TestBackwardsCompatibility extends LuceneTestCase -{ +public class TestBackwardsCompatibility extends LuceneTestCase { // Uncomment these cases & run them on an older Lucene // version, to generate an index to test backwards @@ -215,7 +214,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase hasTested29++; } - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); w.optimize(); w.close(); @@ -355,7 +354,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase Directory dir = FSDirectory.open(new File(dirName)); // open writer - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); // add 10 docs for(int i=0;i<10;i++) { @@ -399,7 +398,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase searcher.close(); // optimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.optimize(); writer.close(); @@ -449,7 +448,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase searcher.close(); // optimize - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.optimize(); writer.close(); @@ -471,9 +470,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase dirName = fullDir(dirName); Directory dir = FSDirectory.open(new File(dirName)); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(doCFS); - writer.setMaxBufferedDocs(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(doCFS); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(doCFS); for(int i=0;i<35;i++) { addDoc(writer, i); @@ -482,9 +481,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase writer.close(); // open fresh writer so we get no prx file in the added segment - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(doCFS); - writer.setMaxBufferedDocs(10); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(doCFS); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(doCFS); addNoProxDoc(writer); writer.close(); @@ -509,8 +508,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase try { Directory dir = FSDirectory.open(new File(fullDir(outputDir))); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setRAMBufferSizeMB(16.0); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); for(int i=0;i<35;i++) { addDoc(writer, i); } diff --git a/src/test/org/apache/lucene/index/TestCheckIndex.java b/src/test/org/apache/lucene/index/TestCheckIndex.java index ce629019285..865c695501d 100644 --- a/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.MockRAMDirectory; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.util.Constants; @@ -34,9 +33,7 @@ public class TestCheckIndex extends LuceneTestCase { public void testDeletedDocs() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<19;i++) { diff --git a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java index fb71e424c3e..33ae792b1c3 100644 --- a/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java +++ b/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java @@ -17,20 +17,17 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.analysis.SimpleAnalyzer; -import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; public class TestConcurrentMergeScheduler extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); - private static class FailOnlyOnFlush extends MockRAMDirectory.Failure { boolean doFail; boolean hitExc; @@ -68,10 +65,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { FailOnlyOnFlush failure = new FailOnlyOnFlush(); directory.failOn(failure); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); @@ -115,9 +109,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { RAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); LogDocMergePolicy mp = new LogDocMergePolicy(writer); writer.setMergePolicy(mp); @@ -157,12 +149,10 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { RAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); for(int iter=0;iter<7;iter++) { - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); for(int j=0;j<21;j++) { Document doc = new Document(); @@ -174,7 +164,9 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles"); // Reopen - writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND) + .setMaxBufferedDocs(2)); } writer.close(); @@ -189,13 +181,10 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100); for(int iter=0;iter<10;iter++) { - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(100); for(int j=0;j<201;j++) { idField.setValue(Integer.toString(iter*201+j)); @@ -210,7 +199,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { // Force a bunch of merge threads to kick off so we // stress out aborting them on close: - writer.setMergeFactor(3); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3); writer.addDocument(doc); writer.commit(); @@ -221,7 +210,8 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase { reader.close(); // Reopen - writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100); } writer.close(); diff --git a/src/test/org/apache/lucene/index/TestCrash.java b/src/test/org/apache/lucene/index/TestCrash.java index e5b873628a7..63b1d1076d1 100644 --- a/src/test/org/apache/lucene/index/TestCrash.java +++ b/src/test/org/apache/lucene/index/TestCrash.java @@ -20,7 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.document.Document; @@ -35,10 +34,8 @@ public class TestCrash extends LuceneTestCase { private IndexWriter initIndex(MockRAMDirectory dir) throws IOException { dir.setLockFactory(NoLockFactory.getNoLockFactory()); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - //writer.setMaxBufferedDocs(2); - writer.setMaxBufferedDocs(10); - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions(); Document doc = new Document(); doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); @@ -51,7 +48,7 @@ public class TestCrash extends LuceneTestCase { private void crash(final IndexWriter writer) throws IOException { final MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); - ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getMergeScheduler(); + ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler(); dir.crash(); cms.sync(); dir.clearCrash(); diff --git a/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/src/test/org/apache/lucene/index/TestDeletionPolicy.java index af47daab120..88ab6ca506c 100644 --- a/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -23,9 +23,9 @@ import java.util.List; import java.util.Set; import java.util.Collection; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -40,8 +40,8 @@ import org.apache.lucene.util.LuceneTestCase; against it, and add documents to it. */ -public class TestDeletionPolicy extends LuceneTestCase -{ +public class TestDeletionPolicy extends LuceneTestCase { + private void verifyCommitOrder(List commits) throws IOException { final IndexCommit firstCommit = commits.get(0); long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName()); @@ -201,8 +201,10 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setUseCompoundFile(useCompoundFile); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setIndexDeletionPolicy(policy)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); writer.close(); long lastDeleteTime = 0; @@ -210,8 +212,11 @@ public class TestDeletionPolicy extends LuceneTestCase // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setUseCompoundFile(useCompoundFile); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)); + lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); } @@ -271,17 +276,22 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(10); - writer.setUseCompoundFile(useCompoundFile); - writer.setMergeScheduler(new SerialMergeScheduler()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setIndexDeletionPolicy(policy) + .setMaxBufferedDocs(10).setMergeScheduler(new SerialMergeScheduler())); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); for(int i=0;i<107;i++) { addDoc(writer); } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setUseCompoundFile(useCompoundFile); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)); + lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); writer.optimize(); writer.close(); @@ -318,7 +328,9 @@ public class TestDeletionPolicy extends LuceneTestCase // Open & close a writer and assert that it // actually removed something: int preCount = dir.listAll().length; - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND) + .setIndexDeletionPolicy(policy)); writer.close(); int postCount = dir.listAll().length; assertTrue(postCount < preCount); @@ -340,8 +352,9 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new MockRAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setIndexDeletionPolicy(policy) + .setMaxBufferedDocs(2)); for(int i=0;i<10;i++) { addDoc(writer); if ((1+i)%2 == 0) @@ -359,7 +372,7 @@ public class TestDeletionPolicy extends LuceneTestCase assertTrue(lastCommit != null); // Now add 1 doc and optimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setIndexDeletionPolicy(policy)); addDoc(writer); assertEquals(11, writer.numDocs()); writer.optimize(); @@ -368,7 +381,8 @@ public class TestDeletionPolicy extends LuceneTestCase assertEquals(7, IndexReader.listCommits(dir).size()); // Now open writer on the commit just before optimize: - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Should undo our rollback: @@ -380,7 +394,8 @@ public class TestDeletionPolicy extends LuceneTestCase assertEquals(11, r.numDocs()); r.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setIndexDeletionPolicy(policy).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Commits the rollback: writer.close(); @@ -396,7 +411,7 @@ public class TestDeletionPolicy extends LuceneTestCase r.close(); // Reoptimize - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setIndexDeletionPolicy(policy)); writer.optimize(); writer.close(); @@ -407,7 +422,7 @@ public class TestDeletionPolicy extends LuceneTestCase // Now open writer on the commit just before optimize, // but this time keeping only the last commit: - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setIndexCommit(lastCommit)); assertEquals(10, writer.numDocs()); // Reader still sees optimized index, because writer @@ -443,16 +458,22 @@ public class TestDeletionPolicy extends LuceneTestCase Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(10); - writer.setUseCompoundFile(useCompoundFile); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE) + .setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); for(int i=0;i<107;i++) { addDoc(writer); } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setUseCompoundFile(useCompoundFile); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)); + lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(useCompoundFile); + lmp.setUseCompoundDocStore(useCompoundFile); writer.optimize(); writer.close(); @@ -486,9 +507,12 @@ public class TestDeletionPolicy extends LuceneTestCase KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); for(int j=0;j lastFlushCount); lastFlushCount = flushCount; writer.setRAMBufferSizeMB(0.000001); - writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); } else if (j < 20) { assertTrue(flushCount > lastFlushCount); lastFlushCount = flushCount; } else if (20 == j) { writer.setRAMBufferSizeMB(16); - writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); lastFlushCount = flushCount; } else if (j < 30) { assertEquals(flushCount, lastFlushCount); } else if (30 == j) { writer.setRAMBufferSizeMB(0.000001); - writer.setMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); } else if (j < 40) { assertTrue(flushCount> lastFlushCount); lastFlushCount = flushCount; } else if (40 == j) { writer.setMaxBufferedDocs(10); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); lastFlushCount = flushCount; } else if (j < 50) { assertEquals(flushCount, lastFlushCount); writer.setMaxBufferedDocs(10); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); } else if (50 == j) { assertTrue(flushCount > lastFlushCount); } @@ -1259,12 +1263,15 @@ public class TestIndexWriter extends LuceneTestCase { dir.close(); } + /** + * @deprecated after setters on IW go away, this test can be deleted because + * changing those settings on IW won't be possible. + */ public void testChangingRAMBuffer2() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMaxBufferedDeleteTerms(10); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms( + 10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)); for(int j=1;j<52;j++) { Document doc = new Document(); @@ -1292,25 +1299,25 @@ public class TestIndexWriter extends LuceneTestCase { lastFlushCount = flushCount; } else if (20 == j) { writer.setRAMBufferSizeMB(16); - writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH); lastFlushCount = flushCount; } else if (j < 30) { assertEquals(flushCount, lastFlushCount); } else if (30 == j) { writer.setRAMBufferSizeMB(0.000001); - writer.setMaxBufferedDeleteTerms(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH); writer.setMaxBufferedDeleteTerms(1); } else if (j < 40) { assertTrue(flushCount> lastFlushCount); lastFlushCount = flushCount; } else if (40 == j) { writer.setMaxBufferedDeleteTerms(10); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); lastFlushCount = flushCount; } else if (j < 50) { assertEquals(flushCount, lastFlushCount); writer.setMaxBufferedDeleteTerms(10); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); } else if (50 == j) { assertTrue(flushCount > lastFlushCount); } @@ -1321,8 +1328,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testDiverseDocs() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setRAMBufferSizeMB(0.5); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setRAMBufferSizeMB(0.5)); Random rand = newRandom(); for(int i=0;i<3;i++) { // First, docs where every term is unique (heavy on @@ -1370,8 +1376,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testEnablingNorms() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); // Enable norms for only 1 doc, pre flush for(int j=0;j<10;j++) { Document doc = new Document(); @@ -1391,8 +1396,8 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(10, hits.length); searcher.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10)); // Enable norms for only 1 doc, post flush for(int j=0;j<27;j++) { Document doc = new Document(); @@ -1417,8 +1422,8 @@ public class TestIndexWriter extends LuceneTestCase { public void testHighFreqTerm() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, new IndexWriter.MaxFieldLength(100000000)); - writer.setRAMBufferSizeMB(0.01); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxFieldLength(100000000).setRAMBufferSizeMB(0.01)); // Massive doc that has 128 K a's StringBuilder b = new StringBuilder(1024*1024); for(int i=0;i<4096;i++) { @@ -1464,7 +1469,8 @@ public class TestIndexWriter extends LuceneTestCase { } Directory dir = new MyRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT)); for (int i = 0; i < 100; i++) { addDoc(writer); } @@ -1475,7 +1481,8 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals("did not get right number of hits", 100, hits.length); writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.CREATE)); writer.close(); dir.close(); @@ -1483,8 +1490,8 @@ public class TestIndexWriter extends LuceneTestCase { public void testFlushWithNoMerging() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<19;i++) @@ -1502,7 +1509,7 @@ public class TestIndexWriter extends LuceneTestCase { // empty doc (no norms) and flush public void testEmptyDocAfterFlushingRealDoc() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); @@ -1521,12 +1528,12 @@ public class TestIndexWriter extends LuceneTestCase { Directory dir = new MockRAMDirectory(); for(int pass=0;pass<2;pass++) { - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMergeScheduler(new ConcurrentMergeScheduler()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE) + .setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(101); Document doc = new Document(); doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(101); for(int i=0;i<200;i++) writer.addDocument(doc); writer.optimize(false); @@ -1575,11 +1582,10 @@ public class TestIndexWriter extends LuceneTestCase { */ public void testBadSegment() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter ir = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document document = new Document(); - document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED, - Field.TermVector.YES)); + document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES)); ir.addDocument(document); ir.close(); dir.close(); @@ -1588,7 +1594,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1008 public void testNoTermVectorAfterTermVector() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1614,7 +1620,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1010 public void testNoTermVectorAfterTermVectorMerge() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1646,12 +1652,11 @@ public class TestIndexWriter extends LuceneTestCase { int pri = Thread.currentThread().getPriority(); try { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) iw.getMergePolicy()).setMergeFactor(2); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); - iw.setMaxBufferedDocs(2); - iw.setMergeFactor(2); Thread.currentThread().setPriority(Thread.MAX_PRIORITY); for(int i=0;i<4;i++) iw.addDocument(document); @@ -1686,11 +1691,12 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1013 public void testSetMaxMergeDocs() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - iw.setMergeScheduler(new MyMergeScheduler()); - iw.setMaxMergeDocs(20); - iw.setMaxBufferedDocs(2); - iw.setMergeFactor(2); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMergeScheduler(new MyMergeScheduler()) + .setMaxBufferedDocs(2)); + LogMergePolicy lmp = (LogMergePolicy) iw.getMergePolicy(); + lmp.setMaxMergeDocs(20); + lmp.setMergeFactor(2); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1702,7 +1708,8 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1072 public void testExceptionFromTokenStream() throws IOException { RAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new Analyzer() { + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT); + conf.setAnalyzer(new Analyzer() { @Override public TokenStream tokenStream(String fieldName, Reader reader) { @@ -1719,7 +1726,8 @@ public class TestIndexWriter extends LuceneTestCase { }; } - }, true, IndexWriter.MaxFieldLength.LIMITED); + }); + IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; @@ -1804,8 +1812,7 @@ public class TestIndexWriter extends LuceneTestCase { failure.setDoFail(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; doc.add(new Field("content", contents, Field.Store.NO, @@ -1860,7 +1867,7 @@ public class TestIndexWriter extends LuceneTestCase { for(int i=0;i<2;i++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); //writer.setInfoStream(System.out); Document doc = new Document(); doc.add(new Field("contents", "here are some contents", Field.Store.YES, @@ -1903,8 +1910,8 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(1, numDel); - writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setAnalyzer(analyzer).setMaxBufferedDocs(10)); doc = new Document(); doc.add(new Field("contents", "here are some contents", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -1948,7 +1955,7 @@ public class TestIndexWriter extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(); { - final IndexWriter writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); final int finalI = i; @@ -2017,8 +2024,8 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(NUM_THREAD*NUM_ITER, numDel); - IndexWriter writer = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxBufferedDocs(10)); Document doc = new Document(); doc.add(new Field("contents", "here are some contents", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -2051,10 +2058,11 @@ public class TestIndexWriter extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(); int delID = 0; for(int i=0;i<20;i++) { - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(2); - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setMergeFactor(2); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; @@ -2087,8 +2095,10 @@ public class TestIndexWriter extends LuceneTestCase { reader.close(); if (0 == i % 4) { - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(false); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); + LogMergePolicy lmp2 = (LogMergePolicy) writer.getMergePolicy(); + lmp2.setUseCompoundFile(false); + lmp2.setUseCompoundDocStore(false); writer.optimize(); writer.close(); } @@ -2104,21 +2114,18 @@ public class TestIndexWriter extends LuceneTestCase { for(int pass=0;pass<2;pass++) { - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - + IndexWriterConfig conf = new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE) + .setMaxBufferedDocs(2); + if (pass == 2) { + conf.setMergeScheduler(new SerialMergeScheduler()); + } + IndexWriter writer = new IndexWriter(directory, conf); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100); + //System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2)); for(int iter=0;iter<10;iter++) { //System.out.println("TEST: iter=" + iter); - MergeScheduler ms; - if (pass == 1) - ms = new ConcurrentMergeScheduler(); - else - ms = new SerialMergeScheduler(); - - writer.setMergeScheduler(ms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(100); - for(int j=0;j<199;j++) { idField.setValue(Integer.toString(iter*201+j)); writer.addDocument(doc); @@ -2132,7 +2139,7 @@ public class TestIndexWriter extends LuceneTestCase { // Force a bunch of merge threads to kick off so we // stress out aborting them on close: - writer.setMergeFactor(2); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); final IndexWriter finalWriter = writer; final ArrayList failure = new ArrayList(); @@ -2176,7 +2183,7 @@ public class TestIndexWriter extends LuceneTestCase { reader.close(); // Reopen - writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); } writer.close(); } @@ -2256,15 +2263,11 @@ public class TestIndexWriter extends LuceneTestCase { for(int iter=0;iter<7;iter++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10); // We expect AlreadyClosedException - cms.setSuppressExceptions(); - - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(4); + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, conf); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; @@ -2315,9 +2318,8 @@ public class TestIndexWriter extends LuceneTestCase { // OK: public void testImmediateDiskFull() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); dir.setMaxSizeInBytes(dir.getRecomputedActualSizeInBytes()); - writer.setMaxBufferedDocs(2); final Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); try { @@ -2353,13 +2355,11 @@ public class TestIndexWriter extends LuceneTestCase { for(int iter=0;iter<10;iter++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2); // We expect disk full exceptions in the merge threads - cms.setSuppressExceptions(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(4); + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, conf); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); dir.setMaxSizeInBytes(4*1024+20*iter); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; @@ -2414,8 +2414,7 @@ public class TestIndexWriter extends LuceneTestCase { public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); final Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -2444,13 +2443,11 @@ public class TestIndexWriter extends LuceneTestCase { for(int iter=0;iter<2;iter++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2); // We expect disk full exceptions in the merge threads - cms.setSuppressExceptions(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(4); + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, conf); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(4); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; @@ -2604,7 +2601,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testUnlimitedMaxFieldLength() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); StringBuilder b = new StringBuilder(); @@ -2628,7 +2625,7 @@ public class TestIndexWriter extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); // add 100 documents for (int i = 0; i < 100; i++) { @@ -2664,9 +2661,8 @@ public class TestIndexWriter extends LuceneTestCase { public void testForceCommit() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(5); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); for (int i = 0; i < 23; i++) addDoc(writer); @@ -2718,13 +2714,9 @@ public class TestIndexWriter extends LuceneTestCase { FailOnlyInSync failure = new FailOnlyInSync(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); failure.setDoFail(); - - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(5); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); for (int i = 0; i < 23; i++) { addDoc(writer); @@ -2737,7 +2729,7 @@ public class TestIndexWriter extends LuceneTestCase { } } - cms.sync(); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); assertTrue(failure.didFail); failure.clearDoFail(); writer.close(); @@ -2753,11 +2745,10 @@ public class TestIndexWriter extends LuceneTestCase { Directory dir = new MockRAMDirectory(); for(int iter=0;iter<2;iter++) { - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); - writer.setMergeScheduler(new SerialMergeScheduler()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler( + new SerialMergeScheduler())); writer.setMergePolicy(new LogDocMergePolicy(writer)); Document document = new Document(); @@ -2786,11 +2777,10 @@ public class TestIndexWriter extends LuceneTestCase { } reader.close(); - writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); - writer.setMergeScheduler(new SerialMergeScheduler()); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler( + new SerialMergeScheduler())); writer.setMergePolicy(new LogDocMergePolicy(writer)); Directory[] indexDirs = {new MockRAMDirectory(dir)}; @@ -2805,11 +2795,10 @@ public class TestIndexWriter extends LuceneTestCase { public void testTermVectorCorruption2() throws IOException { Directory dir = new MockRAMDirectory(); for(int iter=0;iter<2;iter++) { - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); - writer.setMergeScheduler(new SerialMergeScheduler()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler( + new SerialMergeScheduler())); writer.setMergePolicy(new LogDocMergePolicy(writer)); Document document = new Document(); @@ -2842,12 +2831,10 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1168 public void testTermVectorCorruption3() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); - writer.setMergeScheduler(new SerialMergeScheduler()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler( + new SerialMergeScheduler())); writer.setMergePolicy(new LogDocMergePolicy(writer)); Document document = new Document(); @@ -2864,12 +2851,10 @@ public class TestIndexWriter extends LuceneTestCase { writer.addDocument(document); writer.close(); - writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); - writer.setMergeScheduler(new SerialMergeScheduler()); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH).setMergeScheduler( + new SerialMergeScheduler())); writer.setMergePolicy(new LogDocMergePolicy(writer)); for(int i=0;i<6;i++) writer.addDocument(document); @@ -2890,7 +2875,8 @@ public class TestIndexWriter extends LuceneTestCase { public void testUserSpecifiedMaxFieldLength() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new IndexWriter.MaxFieldLength(100000)); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxFieldLength(100000)); Document doc = new Document(); StringBuilder b = new StringBuilder(); @@ -2912,11 +2898,9 @@ public class TestIndexWriter extends LuceneTestCase { // are required public void testExpungeDeletes() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH)); Document document = new Document(); @@ -2940,9 +2924,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(8, ir.numDocs()); ir.close(); - writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); assertEquals(8, writer.numDocs()); assertEquals(10, writer.maxDoc()); writer.expungeDeletes(); @@ -2958,12 +2940,10 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-325: test expungeDeletes, when many adjacent merges are required public void testExpungeDeletes2() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(50); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(50); Document document = new Document(); @@ -2987,10 +2967,8 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(49, ir.numDocs()); ir.close(); - writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMergeFactor(3); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3); assertEquals(49, writer.numDocs()); writer.expungeDeletes(); writer.close(); @@ -3005,12 +2983,10 @@ public class TestIndexWriter extends LuceneTestCase { // many adjacent merges are required public void testExpungeDeletes3() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(50); - writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2).setRAMBufferSizeMB( + IndexWriterConfig.DISABLE_AUTO_FLUSH)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(50); Document document = new Document(); @@ -3034,11 +3010,9 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(49, ir.numDocs()); ir.close(); - writer = new IndexWriter(dir, - new StandardAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); // Force many merges to happen - writer.setMergeFactor(3); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(3); writer.expungeDeletes(false); writer.close(); ir = IndexReader.open(dir, true); @@ -3051,7 +3025,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1179 public void testEmptyFieldName() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -3059,10 +3033,10 @@ public class TestIndexWriter extends LuceneTestCase { } // LUCENE-1198 - public class MockIndexWriter extends IndexWriter { + private static final class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } boolean doFail; @@ -3074,10 +3048,11 @@ public class TestIndexWriter extends LuceneTestCase { return true; } } + public void testExceptionDocumentsWriterInit() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3097,8 +3072,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1208 public void testExceptionJustBeforeFlush() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - w.setMaxBufferedDocs(2); + MockIndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3125,10 +3099,10 @@ public class TestIndexWriter extends LuceneTestCase { dir.close(); } - public class MockIndexWriter2 extends IndexWriter { + private static final class MockIndexWriter2 extends IndexWriter { - public MockIndexWriter2(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } boolean doFail; @@ -3143,15 +3117,14 @@ public class TestIndexWriter extends LuceneTestCase { return true; } } + // LUCENE-1210 public void testExceptionOnMergeInit() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter2 w = new MockIndexWriter2(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - w.setMaxBufferedDocs(2); - w.setMergeFactor(2); + MockIndexWriter2 w = new MockIndexWriter2(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2); w.doFail = true; - w.setMergeScheduler(new ConcurrentMergeScheduler()); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3162,16 +3135,16 @@ public class TestIndexWriter extends LuceneTestCase { break; } - ((ConcurrentMergeScheduler) w.getMergeScheduler()).sync(); + ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync(); assertTrue(w.failed); w.close(); dir.close(); } - public class MockIndexWriter3 extends IndexWriter { + private static final class MockIndexWriter3 extends IndexWriter { - public MockIndexWriter3(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } boolean afterWasCalled; @@ -3187,11 +3160,12 @@ public class TestIndexWriter extends LuceneTestCase { beforeWasCalled = true; } } + // LUCENE-1222 public void testDoBeforeAfterFlush() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter3 w = new MockIndexWriter3(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + MockIndexWriter3 w = new MockIndexWriter3(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3242,12 +3216,13 @@ public class TestIndexWriter extends LuceneTestCase { } } } + // LUCENE-1214 public void testExceptionsDuringCommit() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); FailOnlyInCommit failure = new FailOnlyInCommit(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3295,7 +3270,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-510 public void testInvalidUTF16() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); final int count = utf8Data.length/2; @@ -3508,7 +3483,7 @@ public class TestIndexWriter extends LuceneTestCase { }; MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", tokens)); w.addDocument(doc); @@ -3540,9 +3515,8 @@ public class TestIndexWriter extends LuceneTestCase { public void testPrepareCommit() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(5); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); for (int i = 0; i < 23; i++) addDoc(writer); @@ -3592,10 +3566,8 @@ public class TestIndexWriter extends LuceneTestCase { MockRAMDirectory dir = new MockRAMDirectory(); dir.setPreventDoubleWrite(false); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(5); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); for (int i = 0; i < 23; i++) addDoc(writer); @@ -3617,7 +3589,7 @@ public class TestIndexWriter extends LuceneTestCase { reader.close(); reader2.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < 17; i++) addDoc(writer); @@ -3645,7 +3617,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testPrepareCommitNoChanges() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.prepareCommit(); writer.commit(); writer.close(); @@ -3672,15 +3644,14 @@ public class TestIndexWriter extends LuceneTestCase { public RunAddIndexesThreads(int numCopy) throws Throwable { NUM_COPY = numCopy; dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); for (int i = 0; i < NUM_INIT_DOCS; i++) addDoc(writer); writer.close(); dir2 = new MockRAMDirectory(); - writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - cms = (ConcurrentMergeScheduler) writer2.getMergeScheduler(); + writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); + cms = (ConcurrentMergeScheduler) writer2.getConfig().getMergeScheduler(); readers = new IndexReader[NUM_COPY]; for(int i=0;i data = new HashMap(); @@ -4021,7 +3991,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals("test1", r.getCommitUserData().get("label")); r.close(); - w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); w.optimize(); w.close(); @@ -4032,17 +4002,17 @@ public class TestIndexWriter extends LuceneTestCase { public void testOptimizeExceptions() throws IOException { RAMDirectory startDir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - w.setMaxBufferedDocs(2); - w.setMergeFactor(100); + IndexWriter w = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) w.getMergePolicy()).setMergeFactor(100); for(int i=0;i<27;i++) addDoc(w); w.close(); for(int i=0;i<200;i++) { MockRAMDirectory dir = new MockRAMDirectory(startDir); - w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - ((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions(); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT); + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + w = new IndexWriter(dir, conf); dir.setRandomIOExceptionRate(0.5, 100); try { w.optimize(); @@ -4060,7 +4030,7 @@ public class TestIndexWriter extends LuceneTestCase { final List thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT)) { @Override public void message(final String message) { if (message.startsWith("now flush at close") && 0 == thrown.size()) { @@ -4085,7 +4055,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1442 public void testDoubleOffsetCounting() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4120,7 +4090,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1442 public void testDoubleOffsetCounting2() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4142,7 +4112,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionCharAnalyzer() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4165,7 +4135,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testEndOffsetPositionWithCachingTokenFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); - IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd "))); Field f = new Field("field", stream, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4189,7 +4159,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); - IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd "))); TokenStream sink = tee.newSinkTokenStream(); @@ -4214,7 +4184,9 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStopFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StopAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT) + .setAnalyzer(new StopAnalyzer(TEST_VERSION_CURRENT))); Document doc = new Document(); Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4236,7 +4208,9 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandard() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); Document doc = new Document(); Field f = new Field("field", "abcd the ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4266,7 +4240,9 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); Document doc = new Document(); Field f = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4293,7 +4269,9 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField2() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer( + TEST_VERSION_CURRENT))); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, @@ -4335,7 +4313,7 @@ public class TestIndexWriter extends LuceneTestCase { out.writeByte((byte) 42); out.close(); - new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED).close(); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)).close(); assertTrue(dir.fileExists("myrandomfile")); @@ -4351,8 +4329,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testDeadlock() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -4363,7 +4340,7 @@ public class TestIndexWriter extends LuceneTestCase { // index has 2 segments MockRAMDirectory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer2.addDocument(doc); writer2.close(); @@ -4401,7 +4378,8 @@ public class TestIndexWriter extends LuceneTestCase { if (w != null) { w.close(); } - w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2); //((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions(); if (!first && !allowInterrupt) { @@ -4410,8 +4388,6 @@ public class TestIndexWriter extends LuceneTestCase { allowInterrupt = true; } - w.setMaxBufferedDocs(2); - w.setMergeFactor(2); Document doc = new Document(); doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED)); for(int i=0;i<100;i++) { @@ -4510,7 +4486,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testIndexStoreCombos() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); byte[] b = new byte[50]; for(int i=0;i<50;i++) b[i] = (byte) (i+77); @@ -4572,7 +4548,7 @@ public class TestIndexWriter extends LuceneTestCase { // LUCENE-1727: make sure doc fields are stored in order public void testStoredFieldsOrder() throws Throwable { Directory d = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO)); doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO)); @@ -4604,7 +4580,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testEmbeddedFFFF() throws Throwable { Directory d = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); @@ -4619,8 +4595,10 @@ public class TestIndexWriter extends LuceneTestCase { public void testNoDocsIndex() throws Throwable { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); writer.setInfoStream(new PrintStream(bos)); writer.addDocument(new Document()); @@ -4637,7 +4615,7 @@ public class TestIndexWriter extends LuceneTestCase { final int NUM_THREADS = 5; final double RUN_SEC = 0.5; final Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); w.commit(); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[NUM_THREADS]; @@ -4688,7 +4666,7 @@ public class TestIndexWriter extends LuceneTestCase { for(int iter=0;iter<2;iter++) { Directory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 122fe7a9d2c..723da145f68 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -20,7 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Arrays; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.IndexSearcher; @@ -41,10 +40,8 @@ public class TestIndexWriterDelete extends LuceneTestCase { String[] text = { "Amsterdam", "Venice" }; Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setUseCompoundFile(true); - modifier.setMaxBufferedDeleteTerms(1); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDeleteTerms(1)); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); @@ -78,10 +75,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testNonRAMDelete() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2) + .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -113,9 +109,8 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testMaxBufferedDeletes() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDeleteTerms(1); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDeleteTerms(1)); writer.deleteDocuments(new Term("foobar", "1")); writer.deleteDocuments(new Term("foobar", "1")); writer.deleteDocuments(new Term("foobar", "1")); @@ -128,10 +123,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testRAMDeletes() throws IOException { for(int t=0;t<2;t++) { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(4); - modifier.setMaxBufferedDeleteTerms(4); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(4) + .setMaxBufferedDeleteTerms(4)); int id = 0; int value = 100; @@ -170,10 +164,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { // test when delete terms apply to both disk and ram segments public void testBothDeletes() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(100); - modifier.setMaxBufferedDeleteTerms(100); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(100) + .setMaxBufferedDeleteTerms(100)); int id = 0; int value = 100; @@ -203,10 +196,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { // test that batched delete terms are flushed together public void testBatchDeletes() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2) + .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -247,10 +239,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { // test deleteAll() public void testDeleteAll() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2) + .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -294,10 +285,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { // test rollback of deleteAll() public void testDeleteAllRollback() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2) + .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -332,10 +322,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { // test deleteAll() w/ near real-time reader public void testDeleteAllNRT() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(2) + .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -424,8 +413,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(startDir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(startDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(new Field("id", Integer.toString(i), Field.Store.YES, @@ -447,11 +435,9 @@ public class TestIndexWriterDelete extends LuceneTestCase { while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.setPreventDoubleWrite(false); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); - - modifier.setMaxBufferedDocs(1000); // use flush or close - modifier.setMaxBufferedDeleteTerms(1000); // use flush or close + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDocs(1000) + .setMaxBufferedDeleteTerms(1000)); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk @@ -653,10 +639,11 @@ public class TestIndexWriterDelete extends LuceneTestCase { String[] text = { "Amsterdam", "Venice" }; MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - modifier.setUseCompoundFile(true); - modifier.setMaxBufferedDeleteTerms(2); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setMaxBufferedDeleteTerms(2)); + LogMergePolicy lmp = (LogMergePolicy) modifier.getMergePolicy(); + lmp.setUseCompoundFile(true); + lmp.setUseCompoundDocStore(true); dir.failOn(failure.reset()); @@ -762,8 +749,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { String[] text = { "Amsterdam", "Venice" }; MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, - new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); dir.failOn(failure.reset()); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java index 5f9e874d7bb..828b7426ef0 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java @@ -24,8 +24,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -111,11 +109,11 @@ public class TestIndexWriterExceptions extends LuceneTestCase { ThreadLocal doFail = new ThreadLocal(); - public class MockIndexWriter extends IndexWriter { + private class MockIndexWriter extends IndexWriter { Random r = new java.util.Random(17); - public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } @Override @@ -134,10 +132,9 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptions() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); + MockIndexWriter writer = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setRAMBufferSizeMB(0.1)); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions(); //writer.setMaxBufferedDocs(10); - writer.setRAMBufferSizeMB(0.1); if (DEBUG) writer.setInfoStream(System.out); @@ -172,10 +169,9 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptionsThreads() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); + MockIndexWriter writer = new MockIndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setRAMBufferSizeMB(0.2)); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions(); //writer.setMaxBufferedDocs(10); - writer.setRAMBufferSizeMB(0.2); if (DEBUG) writer.setInfoStream(System.out); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java index c139792e01a..c1f71c21e9c 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java @@ -22,6 +22,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.FSDirectory; /** @@ -74,10 +75,10 @@ public class TestIndexWriterLockRelease extends LuceneTestCase { public void testIndexWriterLockRelease() throws IOException { FSDirectory dir = FSDirectory.open(this.__test_dir); try { - new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); } catch (FileNotFoundException e) { try { - new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); } catch (FileNotFoundException e1) { } } finally { diff --git a/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java b/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java index 098b3618d8d..f6d97db373a 100755 --- a/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java @@ -19,9 +19,9 @@ package org.apache.lucene.index; import java.io.IOException; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util._TestUtil; @@ -34,9 +34,8 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testNormalCase() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); writer.setMergePolicy(new LogDocMergePolicy(writer)); for (int i = 0; i < 100; i++) { @@ -51,9 +50,8 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testNoOverMerge() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); writer.setMergePolicy(new LogDocMergePolicy(writer)); boolean noOverMerge = false; @@ -73,9 +71,8 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testForceFlush() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(10); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); LogDocMergePolicy mp = new LogDocMergePolicy(writer); mp.setMinMergeDocs(100); writer.setMergePolicy(mp); @@ -84,11 +81,11 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { addDoc(writer); writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10)); writer.setMergePolicy(mp); mp.setMinMergeDocs(100); - writer.setMergeFactor(10); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); checkInvariants(writer); } @@ -99,9 +96,8 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMergeFactorChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(100); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100); writer.setMergePolicy(new LogDocMergePolicy(writer)); for (int i = 0; i < 250; i++) { @@ -109,7 +105,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { checkInvariants(writer); } - writer.setMergeFactor(5); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); // merge policy only fixes segments on levels where merges // have been triggered, so check invariants after all adds @@ -125,9 +121,8 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMaxBufferedDocsChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(101); - writer.setMergeFactor(101); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(101)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); // leftmost* segment has 1 doc @@ -139,14 +134,17 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); - writer.setMaxBufferedDocs(101); - writer.setMergeFactor(101); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(101)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); } - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(10); + writer.close(); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10)); + writer.setMergePolicy(new LogDocMergePolicy(writer)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); // merge policy only fixes segments on levels where merges // have been triggered, so check invariants after all adds @@ -159,7 +157,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { addDoc(writer); } writer.commit(); - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); writer.commit(); checkInvariants(writer); @@ -170,10 +168,9 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { public void testMergeDocCount0() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); writer.setMergePolicy(new LogDocMergePolicy(writer)); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(100); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(100); for (int i = 0; i < 250; i++) { addDoc(writer); @@ -185,17 +182,17 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { reader.deleteDocuments(new Term("content", "aaa")); reader.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10)); writer.setMergePolicy(new LogDocMergePolicy(writer)); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(5); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(5); // merge factor is changed, so check invariants after all adds for (int i = 0; i < 10; i++) { addDoc(writer); } writer.commit(); - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); writer.commit(); checkInvariants(writer); assertEquals(10, writer.maxDoc()); @@ -211,9 +208,9 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase { private void checkInvariants(IndexWriter writer) throws IOException { _TestUtil.syncConcurrentMerges(writer); - int maxBufferedDocs = writer.getMaxBufferedDocs(); - int mergeFactor = writer.getMergeFactor(); - int maxMergeDocs = writer.getMaxMergeDocs(); + int maxBufferedDocs = writer.getConfig().getMaxBufferedDocs(); + int mergeFactor = ((LogMergePolicy) writer.getMergePolicy()).getMergeFactor(); + int maxMergeDocs = ((LogMergePolicy) writer.getMergePolicy()).getMaxMergeDocs(); int ramSegmentCount = writer.getNumBufferedDocuments(); assertTrue(ramSegmentCount < maxBufferedDocs); diff --git a/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index e6e30b5ba01..4ad9a46380d 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -20,6 +20,7 @@ import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; @@ -56,8 +57,8 @@ public class TestIndexWriterMerging extends LuceneTestCase Directory merged = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMergeFactor(2); + IndexWriter writer = new IndexWriter(merged, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT))); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); writer.addIndexesNoOptimize(new Directory[]{indexA, indexB}); writer.optimize(); @@ -90,12 +91,13 @@ public class TestIndexWriterMerging extends LuceneTestCase return fail; } - private void fillIndex(Directory dir, int start, int numDocs) throws IOException - { + private void fillIndex(Directory dir, int start, int numDocs) throws IOException { - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMergeFactor(2); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer( + new StandardAnalyzer(TEST_VERSION_CURRENT)) + .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); for (int i = start; i < (start + numDocs); i++) { diff --git a/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 4ada6871424..1f63794f465 100644 --- a/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.List; import java.util.Random; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; @@ -75,8 +74,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); // create the index createIndexNoClose(!optimize, "index1", writer); @@ -110,8 +108,7 @@ public class TestIndexWriterReader extends LuceneTestCase { assertEquals(0, count(new Term("id", id10), r3)); assertEquals(1, count(new Term("id", Integer.toString(8000)), r3)); - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -138,8 +135,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = false; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); @@ -147,8 +143,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // create a 2nd index Directory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer2.setInfoStream(infoStream); createIndexNoClose(!optimize, "index2", writer2); writer2.close(); @@ -185,14 +180,12 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = false; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); // create a 2nd index Directory dir2 = new MockRAMDirectory(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer2.setInfoStream(infoStream); createIndexNoClose(!optimize, "index2", writer2); writer2.close(); @@ -220,8 +213,7 @@ public class TestIndexWriterReader extends LuceneTestCase { boolean optimize = true; Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); @@ -259,8 +251,7 @@ public class TestIndexWriterReader extends LuceneTestCase { writer.close(); // reopen the writer to verify the delete made it to the directory - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); IndexReader w2r1 = writer.getReader(); assertEquals(0, count(new Term("id", id10), w2r1)); @@ -274,8 +265,7 @@ public class TestIndexWriterReader extends LuceneTestCase { int numDirs = 3; Directory mainDir = new MockRAMDirectory(); - IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter mainWriter = new IndexWriter(mainDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); mainWriter.setInfoStream(infoStream); AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter); addDirThreads.launchThreads(numDirs); @@ -318,9 +308,7 @@ public class TestIndexWriterReader extends LuceneTestCase { this.numDirs = numDirs; this.mainWriter = mainWriter; addDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(addDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); for (int i = 0; i < NUM_INIT_DOCS; i++) { Document doc = createDocument(i, "addindex", 4); writer.addDocument(doc); @@ -426,8 +414,7 @@ public class TestIndexWriterReader extends LuceneTestCase { */ public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); IndexReader r1 = writer.getReader(); assertEquals(0, r1.maxDoc()); @@ -464,8 +451,7 @@ public class TestIndexWriterReader extends LuceneTestCase { writer.close(); // test whether the changes made it to the directory - writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); IndexReader w2r1 = writer.getReader(); // insure the deletes were actually flushed to the directory assertEquals(200, w2r1.maxDoc()); @@ -504,8 +490,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public static void createIndex(Directory dir1, String indexName, boolean multiSegment) throws IOException { - IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); w.setMergePolicy(new LogDocMergePolicy(w)); for (int i = 0; i < 100; i++) { w.addDocument(createDocument(i, indexName, 4)); @@ -539,8 +524,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testMergeWarmer() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); writer.setInfoStream(infoStream); // create the index @@ -552,13 +536,12 @@ public class TestIndexWriterReader extends LuceneTestCase { // Enroll warmer MyWarmer warmer = new MyWarmer(); writer.setMergedSegmentWarmer(warmer); - writer.setMergeFactor(2); - writer.setMaxBufferedDocs(2); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); for (int i = 0; i < 10; i++) { writer.addDocument(createDocument(i, "test", 4)); } - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); assertTrue(warmer.warmCount > 0); final int count = warmer.warmCount; @@ -574,8 +557,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testAfterCommit() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); // create the index @@ -591,7 +573,7 @@ public class TestIndexWriterReader extends LuceneTestCase { for (int i = 0; i < 10; i++) { writer.addDocument(createDocument(i, "test", 4)); } - ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); IndexReader r2 = r1.reopen(); if (r2 != r1) { @@ -607,8 +589,7 @@ public class TestIndexWriterReader extends LuceneTestCase { // Make sure reader remains usable even if IndexWriter closes public void testAfterClose() throws Exception { Directory dir1 = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); // create the index @@ -637,10 +618,9 @@ public class TestIndexWriterReader extends LuceneTestCase { // Stress test reopen during addIndexes public void testDuringAddIndexes() throws Exception { Directory dir1 = new MockRAMDirectory(); - final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); - writer.setMergeFactor(2); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); // create the index createIndexNoClose(false, "test", writer); @@ -715,10 +695,9 @@ public class TestIndexWriterReader extends LuceneTestCase { // Stress test reopen during add/delete public void testDuringAddDelete() throws Exception { Directory dir1 = new MockRAMDirectory(); - final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); writer.setInfoStream(infoStream); - writer.setMergeFactor(2); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); // create the index createIndexNoClose(false, "test", writer); @@ -796,8 +775,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testExpungeDeletes() throws Throwable { Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); @@ -821,8 +799,7 @@ public class TestIndexWriterReader extends LuceneTestCase { public void testDeletesNumDocs() throws Throwable { Directory dir = new MockRAMDirectory(); - final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED)); Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); diff --git a/src/test/org/apache/lucene/index/TestLazyBug.java b/src/test/org/apache/lucene/index/TestLazyBug.java index c3fae95685b..639d9ac5ba5 100755 --- a/src/test/org/apache/lucene/index/TestLazyBug.java +++ b/src/test/org/apache/lucene/index/TestLazyBug.java @@ -17,14 +17,21 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.SimpleAnalyzer; -import org.apache.lucene.document.*; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.document.Fieldable; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; - -import java.util.*; +import org.apache.lucene.util.LuceneTestCase; /** @@ -63,10 +70,10 @@ public class TestLazyBug extends LuceneTestCase { Directory dir = new RAMDirectory(); try { Random r = newRandom(); - Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); for (int d = 1; d <= NUM_DOCS; d++) { Document doc = new Document(); diff --git a/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/src/test/org/apache/lucene/index/TestLazyProxSkipping.java index c48b38140e4..7a570bb25b4 100755 --- a/src/test/org/apache/lucene/index/TestLazyProxSkipping.java +++ b/src/test/org/apache/lucene/index/TestLazyProxSkipping.java @@ -19,7 +19,6 @@ package org.apache.lucene.index; import java.io.IOException; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.search.IndexSearcher; @@ -60,9 +59,9 @@ public class TestLazyProxSkipping extends LuceneTestCase { int numDocs = 500; Directory directory = new SeekCountingDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(false); - writer.setMaxBufferedDocs(10); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String content; @@ -118,7 +117,7 @@ public class TestLazyProxSkipping extends LuceneTestCase { public void testSeek() throws IOException { Directory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < 10; i++) { Document doc = new Document(); doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 8acf219b52a..cf03e004043 100644 --- a/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ b/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -44,8 +44,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestMultiLevelSkipList extends LuceneTestCase { public void testSimpleSkip() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new PayloadAnalyzer())); Term term = new Term("test", "a"); for (int i = 0; i < 5000; i++) { Document d1 = new Document(); diff --git a/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java b/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java index 46b634cb912..f2a391539f3 100644 --- a/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java +++ b/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java @@ -19,7 +19,6 @@ package org.apache.lucene.index; import java.util.Random; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.TestIndexWriterReader.HeavyAtomicInt; import org.apache.lucene.store.Directory; @@ -32,13 +31,12 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { public void testIndexing() throws Exception { Directory mainDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(mainDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); IndexReader reader = writer.getReader(); // start pooling readers reader.close(); - writer.setMergeFactor(2); - writer.setMaxBufferedDocs(10); RunThread[] indexThreads = new RunThread[4]; for (int x=0; x < indexThreads.length; x++) { indexThreads[x] = new RunThread(x % 2, writer); diff --git a/src/test/org/apache/lucene/index/TestNorms.java b/src/test/org/apache/lucene/index/TestNorms.java index 6b8e762628d..a06de218f71 100755 --- a/src/test/org/apache/lucene/index/TestNorms.java +++ b/src/test/org/apache/lucene/index/TestNorms.java @@ -26,6 +26,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Store; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.Similarity; import org.apache.lucene.store.Directory; @@ -99,9 +100,10 @@ public class TestNorms extends LuceneTestCase { Directory dir3 = new RAMDirectory(); createIndex(dir3); - IndexWriter iw = new IndexWriter(dir3,anlzr,false, IndexWriter.MaxFieldLength.LIMITED); - iw.setMaxBufferedDocs(5); - iw.setMergeFactor(3); + IndexWriter iw = new IndexWriter(dir3, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(anlzr).setOpenMode(OpenMode.APPEND) + .setMaxBufferedDocs(5)); + ((LogMergePolicy) iw.getMergePolicy()).setMergeFactor(3); iw.addIndexesNoOptimize(new Directory[]{dir1,dir2}); iw.optimize(); iw.close(); @@ -117,9 +119,9 @@ public class TestNorms extends LuceneTestCase { doTestNorms(dir3); // now with optimize - iw = new IndexWriter(dir3,anlzr,false, IndexWriter.MaxFieldLength.LIMITED); - iw.setMaxBufferedDocs(5); - iw.setMergeFactor(3); + iw = new IndexWriter(dir3, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setOpenMode(OpenMode.APPEND).setAnalyzer(anlzr).setMaxBufferedDocs(5)); + ((LogMergePolicy) iw.getMergePolicy()).setMergeFactor(3); iw.optimize(); iw.close(); verifyIndex(dir3); @@ -143,11 +145,13 @@ public class TestNorms extends LuceneTestCase { } private void createIndex(Directory dir) throws IOException { - IndexWriter iw = new IndexWriter(dir,anlzr,true, IndexWriter.MaxFieldLength.LIMITED); - iw.setMaxBufferedDocs(5); - iw.setMergeFactor(3); - iw.setSimilarity(similarityOne); - iw.setUseCompoundFile(true); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setAnalyzer(anlzr) + .setMaxBufferedDocs(5).setSimilarity(similarityOne)); + LogMergePolicy lmp = (LogMergePolicy) iw.getMergePolicy(); + lmp.setMergeFactor(3); + lmp.setUseCompoundFile(true); + lmp.setUseCompoundDocStore(true); iw.close(); } @@ -185,11 +189,13 @@ public class TestNorms extends LuceneTestCase { } private void addDocs(Directory dir, int ndocs, boolean compound) throws IOException { - IndexWriter iw = new IndexWriter(dir,anlzr,false, IndexWriter.MaxFieldLength.LIMITED); - iw.setMaxBufferedDocs(5); - iw.setMergeFactor(3); - iw.setSimilarity(similarityOne); - iw.setUseCompoundFile(compound); + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND).setAnalyzer(anlzr) + .setMaxBufferedDocs(5).setSimilarity(similarityOne)); + LogMergePolicy lmp = (LogMergePolicy) iw.getMergePolicy(); + lmp.setMergeFactor(3); + lmp.setUseCompoundFile(compound); + lmp.setUseCompoundDocStore(compound); for (int i = 0; i < ndocs; i++) { iw.addDocument(newDoc()); } diff --git a/src/test/org/apache/lucene/index/TestOmitTf.java b/src/test/org/apache/lucene/index/TestOmitTf.java index 6d029b6be9f..57bd0ca9e15 100644 --- a/src/test/org/apache/lucene/index/TestOmitTf.java +++ b/src/test/org/apache/lucene/index/TestOmitTf.java @@ -67,7 +67,7 @@ public class TestOmitTf extends LuceneTestCase { public void testOmitTermFreqAndPositions() throws Exception { Directory ram = new MockRAMDirectory(); Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document d = new Document(); // this field will have Tf @@ -113,9 +113,9 @@ public class TestOmitTf extends LuceneTestCase { public void testMixedMerge() throws Exception { Directory ram = new MockRAMDirectory(); Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(3); - writer.setMergeFactor(2); + IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxBufferedDocs(3)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); Document d = new Document(); // this field will have Tf @@ -166,9 +166,9 @@ public class TestOmitTf extends LuceneTestCase { public void testMixedRAM() throws Exception { Directory ram = new MockRAMDirectory(); Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(10); - writer.setMergeFactor(2); + IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxBufferedDocs(10)); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); Document d = new Document(); // this field will have Tf @@ -214,10 +214,12 @@ public class TestOmitTf extends LuceneTestCase { public void testNoPrxFile() throws Throwable { Directory ram = new MockRAMDirectory(); Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMaxBufferedDocs(3); - writer.setMergeFactor(2); - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxBufferedDocs(3)); + LogMergePolicy lmp = (LogMergePolicy) writer.getMergePolicy(); + lmp.setMergeFactor(2); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); Document d = new Document(); Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED); @@ -245,10 +247,10 @@ public class TestOmitTf extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = new MockRAMDirectory(); Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); - writer.setMergeFactor(2); - writer.setMaxBufferedDocs(2); - writer.setSimilarity(new SimpleSimilarity()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer).setMaxBufferedDocs(2) + .setSimilarity(new SimpleSimilarity())); + ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(2); StringBuilder sb = new StringBuilder(265); diff --git a/src/test/org/apache/lucene/index/TestParallelReader.java b/src/test/org/apache/lucene/index/TestParallelReader.java index 62973174d2f..6d1704176cc 100644 --- a/src/test/org/apache/lucene/index/TestParallelReader.java +++ b/src/test/org/apache/lucene/index/TestParallelReader.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.MapFieldSelector; @@ -106,7 +105,7 @@ public class TestParallelReader extends LuceneTestCase { // one document only: Directory dir2 = new MockRAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); w2.addDocument(d3); @@ -151,13 +150,13 @@ public class TestParallelReader extends LuceneTestCase { Directory dir2 = getDir2(); // add another document to ensure that the indexes are not optimized - IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter modifier = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document d = new Document(); d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); modifier.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); d = new Document(); d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); @@ -170,7 +169,7 @@ public class TestParallelReader extends LuceneTestCase { assertFalse(pr.isOptimized()); pr.close(); - modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); modifier.optimize(); modifier.close(); @@ -182,7 +181,7 @@ public class TestParallelReader extends LuceneTestCase { pr.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); modifier.optimize(); modifier.close(); @@ -233,7 +232,7 @@ public class TestParallelReader extends LuceneTestCase { // Fields 1-4 indexed together: private Searcher single() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -263,7 +262,7 @@ public class TestParallelReader extends LuceneTestCase { private Directory getDir1() throws IOException { Directory dir1 = new MockRAMDirectory(); - IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w1 = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -278,7 +277,7 @@ public class TestParallelReader extends LuceneTestCase { private Directory getDir2() throws IOException { Directory dir2 = new RAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java b/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java index 70d421ae535..14f014d1192 100644 --- a/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java +++ b/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java @@ -22,13 +22,12 @@ import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.TermVector; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.RAMDirectory; @@ -47,16 +46,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { */ public void testEmptyIndex() throws IOException { RAMDirectory rd1 = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(rd1, new IndexWriterConfig(TEST_VERSION_CURRENT)); iw.close(); RAMDirectory rd2 = new MockRAMDirectory(rd1); RAMDirectory rdOut = new MockRAMDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.UNLIMITED); + IndexWriter iwOut = new IndexWriter(rdOut, new IndexWriterConfig(TEST_VERSION_CURRENT)); ParallelReader pr = new ParallelReader(); pr.add(IndexReader.open(rd1,true)); pr.add(IndexReader.open(rd2,true)); @@ -80,8 +77,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { public void testEmptyIndexWithVectors() throws IOException { RAMDirectory rd1 = new MockRAMDirectory(); { - IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(rd1, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("test", "", Store.NO, Index.ANALYZED, TermVector.YES)); @@ -95,16 +91,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { ir.deleteDocument(0); ir.close(); - iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), false, - MaxFieldLength.UNLIMITED); + iw = new IndexWriter(rd1, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); iw.optimize(); iw.close(); } RAMDirectory rd2 = new MockRAMDirectory(); { - IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(rd2, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); iw.addDocument(doc); iw.close(); @@ -112,8 +106,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { RAMDirectory rdOut = new MockRAMDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.UNLIMITED); + IndexWriter iwOut = new IndexWriter(rdOut, new IndexWriterConfig(TEST_VERSION_CURRENT)); ParallelReader pr = new ParallelReader(); pr.add(IndexReader.open(rd1,true)); pr.add(IndexReader.open(rd2,true)); diff --git a/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/src/test/org/apache/lucene/index/TestParallelTermEnum.java index d6cf47e8586..9c017dddfc9 100755 --- a/src/test/org/apache/lucene/index/TestParallelTermEnum.java +++ b/src/test/org/apache/lucene/index/TestParallelTermEnum.java @@ -20,7 +20,6 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Index; @@ -37,7 +36,7 @@ public class TestParallelTermEnum extends LuceneTestCase { Document doc; RAMDirectory rd1 = new RAMDirectory(); - IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw1 = new IndexWriter(rd1, new IndexWriterConfig(TEST_VERSION_CURRENT)); doc = new Document(); doc.add(new Field("field1", "the quick brown fox jumps", Store.YES, @@ -49,7 +48,7 @@ public class TestParallelTermEnum extends LuceneTestCase { iw1.close(); RAMDirectory rd2 = new RAMDirectory(); - IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw2 = new IndexWriter(rd2, new IndexWriterConfig(TEST_VERSION_CURRENT)); doc = new Document(); doc.add(new Field("field0", "", Store.NO, Index.ANALYZED)); diff --git a/src/test/org/apache/lucene/index/TestPayloads.java b/src/test/org/apache/lucene/index/TestPayloads.java index c6383cc2b56..7b33ebbbd65 100644 --- a/src/test/org/apache/lucene/index/TestPayloads.java +++ b/src/test/org/apache/lucene/index/TestPayloads.java @@ -30,12 +30,12 @@ import java.util.Random; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.analysis.WhitespaceTokenizer; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.RAMDirectory; @@ -100,7 +100,7 @@ public class TestPayloads extends LuceneTestCase { rnd = newRandom(); Directory ram = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(ram, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document d = new Document(); // this field won't have any payloads d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED)); @@ -127,7 +127,8 @@ public class TestPayloads extends LuceneTestCase { // now we add another document which has payloads for field f3 and verify if the SegmentMerger // enabled payloads for that field - writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(ram, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setAnalyzer(analyzer).setOpenMode(OpenMode.CREATE)); d = new Document(); d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED)); d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED)); @@ -168,7 +169,9 @@ public class TestPayloads extends LuceneTestCase { // different tests to verify the payload encoding private void performTest(Directory dir) throws Exception { PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer(analyzer) + .setOpenMode(OpenMode.CREATE)); // should be in sync with value in TermInfosWriter final int skipInterval = 16; @@ -305,7 +308,8 @@ public class TestPayloads extends LuceneTestCase { // test long payload analyzer = new PayloadAnalyzer(); - writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setAnalyzer(analyzer).setOpenMode(OpenMode.CREATE)); String singleTerm = "lucene"; d = new Document(); @@ -465,7 +469,7 @@ public class TestPayloads extends LuceneTestCase { final ByteArrayPool pool = new ByteArrayPool(numThreads, 5); Directory dir = new RAMDirectory(); - final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); final String field = "test"; Thread[] ingesters = new Thread[numThreads]; diff --git a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index 067103d1493..fb24176b528 100644 --- a/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ b/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -21,7 +21,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -99,8 +98,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testSkipTo(int indexDivisor) throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Term ta = new Term("content","aaa"); for(int i = 0; i < 10; i++) diff --git a/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index 6f657c57d6f..18e79c53e98 100644 --- a/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ b/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -20,23 +20,22 @@ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; -public class TestSegmentTermEnum extends LuceneTestCase -{ +public class TestSegmentTermEnum extends LuceneTestCase { + Directory dir = new RAMDirectory(); - public void testTermEnum() throws IOException - { + public void testTermEnum() throws IOException { IndexWriter writer = null; - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); // ADD 100 documents with term : aaa // add 100 documents with terms: aaa bbb @@ -52,7 +51,7 @@ public class TestSegmentTermEnum extends LuceneTestCase verifyDocFreq(); // merge segments by optimizing the index - writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setOpenMode(OpenMode.APPEND)); writer.optimize(); writer.close(); @@ -63,7 +62,7 @@ public class TestSegmentTermEnum extends LuceneTestCase public void testPrevTermAtEnd() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); addDoc(writer, "aaa bbb"); writer.close(); SegmentReader reader = SegmentReader.getOnlySegmentReader(dir); diff --git a/src/test/org/apache/lucene/index/TestStressIndexing.java b/src/test/org/apache/lucene/index/TestStressIndexing.java index 978bfb76346..75959d3fd9c 100644 --- a/src/test/org/apache/lucene/index/TestStressIndexing.java +++ b/src/test/org/apache/lucene/index/TestStressIndexing.java @@ -19,14 +19,13 @@ package org.apache.lucene.index; import org.apache.lucene.util.*; import org.apache.lucene.store.*; import org.apache.lucene.document.*; -import org.apache.lucene.analysis.*; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.*; import java.util.Random; import java.io.File; public class TestStressIndexing extends LuceneTestCase { - private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private Random RANDOM; private static abstract class TimedThread extends Thread { @@ -118,15 +117,13 @@ public class TestStressIndexing extends LuceneTestCase { stress test. */ public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception { - IndexWriter modifier = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - - modifier.setMaxBufferedDocs(10); + IndexWriter modifier = new IndexWriter(directory, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs( + 10).setMergeScheduler(mergeScheduler)); TimedThread[] threads = new TimedThread[4]; int numThread = 0; - if (mergeScheduler != null) - modifier.setMergeScheduler(mergeScheduler); // One modifier that writes 10 docs then removes 5, over // and over: diff --git a/src/test/org/apache/lucene/index/TestStressIndexing2.java b/src/test/org/apache/lucene/index/TestStressIndexing2.java index ba0096f8007..8c168c25a83 100644 --- a/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -14,20 +14,30 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.store.*; -import org.apache.lucene.document.*; -import org.apache.lucene.analysis.*; - -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util._TestUtil; -import org.apache.lucene.util.StringHelper; -import org.apache.lucene.search.TermQuery; - -import java.util.*; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; import junit.framework.Assert; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util._TestUtil; + public class TestStressIndexing2 extends LuceneTestCase { static int maxFields=4; static int bigFieldSize=10; @@ -40,8 +50,8 @@ public class TestStressIndexing2 extends LuceneTestCase { public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { - super(dir, a, create, mfl); + public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); } @Override @@ -123,9 +133,13 @@ public class TestStressIndexing2 extends LuceneTestCase { public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); - IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - w.setUseCompoundFile(false); - + IndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( + 0.1).setMaxBufferedDocs(maxBufferedDocs)); + LogMergePolicy lmp = (LogMergePolicy) w.getMergePolicy(); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); + lmp.setMergeFactor(mergeFactor); /*** w.setMaxMergeDocs(Integer.MAX_VALUE); w.setMaxFieldLength(10000); @@ -133,11 +147,6 @@ public class TestStressIndexing2 extends LuceneTestCase { w.setMergeFactor(10); ***/ - // force many merges - w.setMergeFactor(mergeFactor); - w.setRAMBufferSizeMB(.1); - w.setMaxBufferedDocs(maxBufferedDocs); - threads = new IndexingThread[nThreads]; for (int i=0; i indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); for(int iter=0;iter<3;iter++) { - IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); - w.setUseCompoundFile(false); - - // force many merges - w.setMergeFactor(mergeFactor); - w.setRAMBufferSizeMB(.1); - w.setMaxBufferedDocs(maxBufferedDocs); + IndexWriter w = new MockIndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT).setOpenMode(OpenMode.CREATE) + .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs)); + LogMergePolicy lmp = (LogMergePolicy) w.getMergePolicy(); + lmp.setUseCompoundFile(false); + lmp.setUseCompoundDocStore(false); + lmp.setMergeFactor(mergeFactor); threads = new IndexingThread[nThreads]; for (int i=0; i docs, Directory dir) throws IOException { - IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); // index all docs in a single thread Iterator iter = docs.values().iterator(); @@ -409,7 +418,6 @@ public class TestStressIndexing2 extends LuceneTestCase { Fieldable f2 = ff2.get(i); if (f1.isBinary()) { assert(f2.isBinary()); - //TODO } else { String s1 = f1.stringValue(); String s2 = f2.stringValue(); diff --git a/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/src/test/org/apache/lucene/index/TestTermVectorsReader.java index 1fd2188ba17..b9a8ad34194 100644 --- a/src/test/org/apache/lucene/index/TestTermVectorsReader.java +++ b/src/test/org/apache/lucene/index/TestTermVectorsReader.java @@ -92,8 +92,9 @@ public class TestTermVectorsReader extends LuceneTestCase { } Arrays.sort(tokens); - IndexWriter writer = new IndexWriter(dir, new MyAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - writer.setUseCompoundFile(false); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(new MyAnalyzer())); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(false); + ((LogMergePolicy) writer.getMergePolicy()).setUseCompoundDocStore(false); Document doc = new Document(); for(int i=0;i data = new HashMap(); data.put("index", "Rolled back to 1-"+id); w.commit(data); @@ -127,7 +126,7 @@ public class TestTransactionRollback extends LuceneTestCase { //Build index, of records 1 to 100, committing after each batch of 10 IndexDeletionPolicy sdp=new KeepAllDeletionPolicy(); - IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),sdp,MaxFieldLength.UNLIMITED); + IndexWriter w=new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setIndexDeletionPolicy(sdp)); for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) { Document doc=new Document(); doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED)); @@ -195,9 +194,8 @@ public class TestTransactionRollback extends LuceneTestCase { for(int i=0;i<2;i++) { // Unless you specify a prior commit point, rollback // should not work: - new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT), - new DeleteLastCommitPolicy(), - MaxFieldLength.UNLIMITED).close(); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT) + .setIndexDeletionPolicy(new DeleteLastCommitPolicy())).close(); IndexReader r = IndexReader.open(dir, true); assertEquals(100, r.numDocs()); r.close(); diff --git a/src/test/org/apache/lucene/index/TestTransactions.java b/src/test/org/apache/lucene/index/TestTransactions.java index 1e0aefb620d..dc4ff9f29cc 100644 --- a/src/test/org/apache/lucene/index/TestTransactions.java +++ b/src/test/org/apache/lucene/index/TestTransactions.java @@ -19,13 +19,16 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.Random; -import org.apache.lucene.store.*; -import org.apache.lucene.util.*; -import org.apache.lucene.analysis.*; -import org.apache.lucene.document.*; -public class TestTransactions extends LuceneTestCase -{ +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.util.English; +import org.apache.lucene.util.LuceneTestCase; + +public class TestTransactions extends LuceneTestCase { + private Random RANDOM; private static volatile boolean doFail; @@ -88,17 +91,15 @@ public class TestTransactions extends LuceneTestCase @Override public void doWork() throws Throwable { - IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); - writer1.setMaxBufferedDocs(3); - writer1.setMergeFactor(2); - ((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer1 = new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(3)); + ((LogMergePolicy) writer1.getMergePolicy()).setMergeFactor(2); + ((ConcurrentMergeScheduler) writer1.getConfig().getMergeScheduler()).setSuppressExceptions(); - IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times - writer2.setMaxBufferedDocs(2); - writer2.setMergeFactor(3); - ((ConcurrentMergeScheduler) writer2.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT).setMaxBufferedDocs(2)); + ((LogMergePolicy) writer2.getMergePolicy()).setMergeFactor(3); + ((ConcurrentMergeScheduler) writer2.getConfig().getMergeScheduler()).setSuppressExceptions(); update(writer1); update(writer2); @@ -178,7 +179,7 @@ public class TestTransactions extends LuceneTestCase } public void initIndex(Directory dir) throws Throwable { - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT)); for(int j=0; j<7; j++) { Document d = new Document(); int n = RANDOM.nextInt(); diff --git a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java index 824af0e717d..9f3ded5dc28 100644 --- a/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java @@ -28,6 +28,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -283,7 +284,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStopWordSearching() throws Exception { Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT); Directory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(analyzer)); Document doc = new Document(); doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED)); iw.addDocument(doc); diff --git a/src/test/org/apache/lucene/queryParser/TestQueryParser.java b/src/test/org/apache/lucene/queryParser/TestQueryParser.java index 9dc3889f956..3adc0b2dbe7 100644 --- a/src/test/org/apache/lucene/queryParser/TestQueryParser.java +++ b/src/test/org/apache/lucene/queryParser/TestQueryParser.java @@ -46,6 +46,7 @@ import org.apache.lucene.document.DateTools; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanQuery; @@ -472,8 +473,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testFarsiRangeCollating() throws Exception { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("content","\u0633\u0627\u0628", Field.Store.YES, Field.Index.NOT_ANALYZED)); @@ -882,7 +882,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testLocalDateFormat() throws IOException, ParseException { RAMDirectory ramDir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir, new IndexWriterConfig(TEST_VERSION_CURRENT)); addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw); addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw); iw.close(); @@ -1028,7 +1028,7 @@ public class TestQueryParser extends LocalizedTestCase { public void testPositionIncrements() throws Exception { Directory dir = new MockRAMDirectory(); Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT); - IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT).setAnalyzer(a)); Document doc = new Document(); doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); diff --git a/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index 29a09fb896b..b1c9e5f0c3f 100644 --- a/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ b/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -24,6 +24,8 @@ import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.RAMDirectory; public class BaseTestRangeFilter extends LuceneTestCase { @@ -96,8 +98,10 @@ public class BaseTestRangeFilter extends LuceneTestCase { try { /* build an index */ - IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(TEST_VERSION_CURRENT), T, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(index.index, new IndexWriterConfig( + TEST_VERSION_CURRENT).setAnalyzer( + new SimpleAnalyzer(TEST_VERSION_CURRENT)) + .setOpenMode(OpenMode.CREATE)); for (int d = minId; d <= maxId; d++) { Document doc = new Document(); diff --git a/src/test/org/apache/lucene/search/QueryUtils.java b/src/test/org/apache/lucene/search/QueryUtils.java index 7116500e4dc..04eb87df954 100644 --- a/src/test/org/apache/lucene/search/QueryUtils.java +++ b/src/test/org/apache/lucene/search/QueryUtils.java @@ -8,12 +8,11 @@ import java.io.ObjectOutputStream; import junit.framework.Assert; -import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.store.RAMDirectory; import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT; @@ -200,8 +199,7 @@ public class QueryUtils { private static RAMDirectory makeEmptyIndex(final int numDeletedDocs) throws IOException { RAMDirectory d = new RAMDirectory(); - IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, - MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < numDeletedDocs; i++) { w.addDocument(new Document()); } diff --git a/src/test/org/apache/lucene/search/TestBoolean2.java b/src/test/org/apache/lucene/search/TestBoolean2.java index 35eb209e915..a3ec45b5a60 100644 --- a/src/test/org/apache/lucene/search/TestBoolean2.java +++ b/src/test/org/apache/lucene/search/TestBoolean2.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.queryParser.ParseException; @@ -50,7 +51,7 @@ public class TestBoolean2 extends LuceneTestCase { protected void setUp() throws Exception { super.setUp(); RAMDirectory directory = new RAMDirectory(); - IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT)); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); @@ -67,14 +68,14 @@ public class TestBoolean2 extends LuceneTestCase { int docCount = 0; do { final Directory copy = new RAMDirectory(dir2); - IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); w.addIndexesNoOptimize(new Directory[] {copy}); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while(docCount < 3000); - IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT)); Document doc = new Document(); doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED)); for(int i=0;i