merged with trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/realtime_search@1091748 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2011-04-13 11:22:24 +00:00
commit 8428ecc7ee
241 changed files with 1834 additions and 1097 deletions

View File

@ -8,10 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -26,5 +22,9 @@
</orderEntry>
<orderEntry type="library" name="Ant" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,20 +8,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
<library>
<CLASSES>
<root url="file://$MODULE_DIR$/lib" />
</CLASSES>
<JAVADOC />
<SOURCES />
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,11 +8,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,10 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -24,5 +20,9 @@
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,11 +8,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,10 +8,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -22,5 +20,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,11 +8,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,13 +10,13 @@
<sourceFolder url="file://$MODULE_DIR$/src/demo/WebContent" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="library" name="Servlet API 2.4" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,9 +9,9 @@
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,8 +10,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -25,5 +23,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,10 +9,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,10 +9,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,13 +10,6 @@
<excludeFolder url="file://$MODULE_DIR$/temp" />
<excludeFolder url="file://$MODULE_DIR$/work" />
</content>
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -30,5 +23,12 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,12 +10,6 @@
<excludeFolder url="file://$MODULE_DIR$/build" />
<excludeFolder url="file://$MODULE_DIR$/lucene-libs" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="smartcn" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="stempel" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -30,5 +24,11 @@
</orderEntry>
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="smartcn" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="stempel" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,16 +10,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -35,5 +25,15 @@
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="common" />
</component>
</module>

View File

@ -11,12 +11,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/test/resources" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,14 +9,14 @@
<sourceFolder url="file://$MODULE_DIR$/test/java" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/test/resources" isTestSource="true" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="dataimporthandler" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="library" name="Solr extraction library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="dataimporthandler" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -10,11 +10,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr extraction library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -11,8 +11,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/main/resources" isTestSource="false" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
@ -26,5 +24,7 @@
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -19,18 +19,18 @@
<excludeFolder url="file://$MODULE_DIR$/lucene-libs" />
<excludeFolder url="file://$MODULE_DIR$/package" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr example library" level="project" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr example library" level="project" />
</component>
</module>

View File

@ -404,6 +404,12 @@ Optimizations
* LUCENE-2990: ArrayUtil/CollectionUtil.*Sort() methods now exit early
on empty or one-element lists/arrays. (Uwe Schindler)
Bug fixes
* LUCENE-3024: Index with more than 2.1B terms was hitting AIOOBE when
seeking TermEnum (eg used by Solr's faceting) (Tom Burton-West, Mike
McCandless)
======================= Lucene 3.1.0 =======================
Changes in backwards compatibility policy

View File

@ -22,16 +22,17 @@ import java.io.File;
import java.io.PrintStream;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
public class TestDemo extends LuceneTestCase {
private void testOneSearch(String query, int expectedHitCount) throws Exception {
private void testOneSearch(File indexPath, String query, int expectedHitCount) throws Exception {
PrintStream outSave = System.out;
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream fakeSystemOut = new PrintStream(bytes);
System.setOut(fakeSystemOut);
SearchFiles.main(new String[] {"-query", query});
SearchFiles.main(new String[] {"-query", query, "-index", indexPath.getPath()});
fakeSystemOut.flush();
String output = bytes.toString(); // intentionally use default encoding
assertTrue("output=" + output, output.contains(expectedHitCount + " total matching documents"));
@ -42,12 +43,13 @@ public class TestDemo extends LuceneTestCase {
public void testIndexSearch() throws Exception {
File dir = getDataFile("test-files/docs");
IndexFiles.main(new String[] { "-create", "-docs", dir.getPath() });
testOneSearch("apache", 3);
testOneSearch("patent", 8);
testOneSearch("lucene", 0);
testOneSearch("gnu", 6);
testOneSearch("derivative", 8);
testOneSearch("license", 13);
File indexDir = _TestUtil.getTempDir("ContribDemoTest");
IndexFiles.main(new String[] { "-create", "-docs", dir.getPath(), "-index", indexDir.getPath()});
testOneSearch(indexDir, "apache", 3);
testOneSearch(indexDir, "patent", 8);
testOneSearch(indexDir, "lucene", 0);
testOneSearch(indexDir, "gnu", 6);
testOneSearch(indexDir, "derivative", 8);
testOneSearch(indexDir, "license", 13);
}
}

View File

@ -58,7 +58,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox jumped";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@ -102,7 +102,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox jumped";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@ -172,7 +172,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@ -215,7 +215,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, TEXT, Store.YES, Index.ANALYZED,
@ -256,7 +256,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),

View File

@ -90,7 +90,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Directory ramDir;
public IndexSearcher searcher = null;
int numHighlights = 0;
final Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
final Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
TopDocs hits;
String[] texts = {
@ -101,7 +101,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
"wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" };
public void testQueryScorerHits() throws Exception {
Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, true);
@ -133,7 +133,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String s1 = "I call our world Flatland, not because we call it so,";
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
// Verify that a query against the default field results in text being
// highlighted
@ -165,7 +165,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
*/
private static String highlightField(Query query, String fieldName, String text)
throws IOException, InvalidTokenOffsetsException {
TokenStream tokenStream = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text));
TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text));
// Assuming "<B>", "</B>" used to highlight
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@ -210,7 +210,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String f2c = f2 + ":";
String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2
+ " OR " + f2c + ph2 + ")";
Analyzer analyzer = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, f1, analyzer);
Query query = qp.parse(q);
@ -1134,13 +1134,13 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
sb.append("stoppedtoken");
}
SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
Highlighter hg = getHighlighter(query, "data", new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true).tokenStream(
Highlighter hg = getHighlighter(query, "data", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream(
"data", new StringReader(sb.toString())), fm);// new Highlighter(fm,
// new
// QueryTermScorer(query));
hg.setTextFragmenter(new NullFragmenter());
hg.setMaxDocCharsToAnalyze(100);
match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocCharsToAnalyze());
@ -1151,7 +1151,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// + whitespace)
sb.append(" ");
sb.append(goodWord);
match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocCharsToAnalyze());
}
@ -1170,10 +1170,10 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String text = "this is a text with searchterm in it";
SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm);
Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm);
hg.setTextFragmenter(new NullFragmenter());
hg.setMaxDocCharsToAnalyze(36);
String match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "text", text);
String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text);
assertTrue(
"Matched text should contain remainder of text after highlighted query ",
match.endsWith("in it"));
@ -1191,7 +1191,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// test to show how rewritten query can still be used
if (searcher != null) searcher.close();
searcher = new IndexSearcher(ramDir, true);
Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
Query query = parser.parse("JF? or Kenned*");
@ -1446,64 +1446,64 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Highlighter highlighter;
String result;
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("foo");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 <B>foo</B>", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("10");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("10");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed<B>10</B> foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi</B>-Speed10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-<B>Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hispeed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
// ///////////////// same tests, just put the bigger overlapping token
// first
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("foo");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 <B>foo</B>", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("10");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("10");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed<B>10</B> foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi</B>-Speed10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-<B>Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hispeed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
@ -1514,7 +1514,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private Directory dir;
private Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
private Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
public void testWeightedTermsWithDeletes() throws IOException, ParseException, InvalidTokenOffsetsException {
makeIndex();
@ -1529,7 +1529,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private void makeIndex() throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
@ -1539,7 +1539,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private void deleteDocument() throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
writer.deleteDocuments( new Term( "t_text1", "del" ) );
// To see negative idf, keep comment the following line
//writer.optimize();
@ -1644,7 +1644,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
dir = newDirectory();
ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
for (String text : texts) {
addDoc(writer, text);
}

View File

@ -87,9 +87,9 @@ public abstract class AbstractTestCase extends LuceneTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
analyzerW = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
analyzerW = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
analyzerB = new BigramAnalyzer();
analyzerK = new MockAnalyzer(MockTokenizer.KEYWORD, false);
analyzerK = new MockAnalyzer(random, MockTokenizer.KEYWORD, false);
paW = new QueryParser(TEST_VERSION_CURRENT, F, analyzerW );
paB = new QueryParser(TEST_VERSION_CURRENT, F, analyzerB );
dir = newDirectory();

View File

@ -59,7 +59,7 @@ public class TestEmptyIndex extends LuceneTestCase {
// make sure a Directory acts the same
Directory d = newDirectory();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
r = IndexReader.open(d, false);
testNorms(r);
r.close();
@ -84,7 +84,7 @@ public class TestEmptyIndex extends LuceneTestCase {
// make sure a Directory acts the same
Directory d = newDirectory();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
r = IndexReader.open(d, false);
termsEnumTest(r);
r.close();

View File

@ -21,6 +21,7 @@ import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
@ -65,7 +66,7 @@ public class TestIndicesEquals extends LuceneTestCase {
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < 20; i++) {
Document document = new Document();
@ -88,10 +89,13 @@ public class TestIndicesEquals extends LuceneTestCase {
Directory dir = newDirectory();
InstantiatedIndex ii = new InstantiatedIndex();
// we need to pass the "same" random to both, so they surely index the same payload data.
long seed = random.nextLong();
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))).setMergePolicy(newLogMergePolicy()));
indexWriter.setInfoStream(VERBOSE ? System.out : null);
if (VERBOSE) {
System.out.println("TEST: make test index");
@ -104,7 +108,7 @@ public class TestIndicesEquals extends LuceneTestCase {
indexWriter.close();
// test ii writer
InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new MockAnalyzer(), true);
InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new MockAnalyzer(new Random(seed)), true);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);

View File

@ -34,17 +34,17 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocument(iw, "Hello, world!");
addDocument(iw, "All work and no play makes jack a dull boy");
iw.close();
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
addDocument(iw, "Hello, tellus!");
addDocument(iw, "All work and no play makes danny a dull boy");
iw.close();
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
addDocument(iw, "Hello, earth!");
addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close();

View File

@ -143,9 +143,9 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
*/
private Analyzer randomAnalyzer() {
switch(random.nextInt(3)) {
case 0: return new MockAnalyzer(MockTokenizer.SIMPLE, true);
case 1: return new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
default: return new MockAnalyzer(MockTokenizer.WHITESPACE, false);
case 0: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
case 1: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
default: return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
}
}

View File

@ -61,7 +61,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();

View File

@ -39,7 +39,7 @@ public class TestIndexSplitter extends LuceneTestCase {
mergePolicy.setNoCFSRatio(1);
IndexWriter iw = new IndexWriter(
fsDir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMergePolicy(mergePolicy)
);

View File

@ -32,7 +32,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
doc = new Document();

View File

@ -25,7 +25,7 @@ public class TestTermVectorAccessor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
Document doc;

View File

@ -134,7 +134,7 @@ public class TestAppendingCodec extends LuceneTestCase {
public void testCodec() throws Exception {
Directory dir = new AppendingRAMDirectory(random, new RAMDirectory());
IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer());
IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random));
cfg.setCodecProvider(new AppendingCodecProvider());
((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false);

View File

@ -40,7 +40,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
public static void setUpClass() throws Exception {
dir = newDirectory();
writer = new IndexWriter(dir, newIndexWriterConfig(random,
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false))
TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))
.setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
indexDocs(writer);

View File

@ -66,7 +66,7 @@ public class TestLengthNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();

View File

@ -39,7 +39,7 @@ public class BooleanFilterTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
addDoc(writer, "admin guest", "010", "20040101","Y");

View File

@ -43,7 +43,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");

View File

@ -34,13 +34,13 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private Directory directory;
private IndexSearcher searcher;
private IndexReader reader;
private Analyzer analyzer=new MockAnalyzer();
private Analyzer analyzer=new MockAnalyzer(random);
@Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy()));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
//Add series of docs with misspelt names
addDoc(writer, "jonathon smythe","1");
@ -121,7 +121,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
}
public void testFuzzyLikeThisQueryEquals() {
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer);
fltq1.addTerms("javi", "subject", 0.5f, 2);
FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer);

View File

@ -56,7 +56,7 @@ public class TestSpanRegexQuery extends LuceneTestCase {
public void testSpanRegex() throws Exception {
Directory directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
Document doc = new Document();
// doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
@ -97,14 +97,14 @@ public class TestSpanRegexQuery extends LuceneTestCase {
// creating first index writer
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerA.addDocument(lDoc);
writerA.optimize();
writerA.close();
// creating second index writer
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerB.addDocument(lDoc2);
writerB.optimize();
writerB.close();

View File

@ -74,7 +74,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
Map<String,Float> originalValues = getOriginalValues();
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
mlt.setMinDocFreq(1);
mlt.setMinTermFreq(1);
mlt.setMinWordLen(1);
@ -109,7 +109,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
private Map<String,Float> getOriginalValues() throws IOException {
Map<String,Float> originalValues = new HashMap<String,Float>();
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
mlt.setMinDocFreq(1);
mlt.setMinTermFreq(1);
mlt.setMinWordLen(1);

View File

@ -34,7 +34,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestComplexPhraseQuery extends LuceneTestCase {
Directory rd;
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
DocData docsContent[] = { new DocData("john smith", "1"),
new DocData("johathon smith", "2"),

View File

@ -43,7 +43,7 @@ public class TestExtendableQueryParser extends TestQueryParser {
public QueryParser getParser(Analyzer a, Extensions extensions)
throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
QueryParser qp = extensions == null ? new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a, extensions);

View File

@ -125,7 +125,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.OR);
@ -171,7 +171,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@ -232,7 +232,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
"+(title:dog title:cat) -author:\"bob dole\"");
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(new MockAnalyzer());
qp.setAnalyzer(new MockAnalyzer(random));
// make sure OR is the default:
assertEquals(Operator.OR, qp.getDefaultOperator());
qp.setDefaultOperator(Operator.AND);
@ -246,7 +246,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
}
public void testPunct() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@ -266,7 +266,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
Analyzer a = new MockAnalyzer();
Analyzer a = new MockAnalyzer(random);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@ -405,7 +405,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
final String defaultField = "default";
final String monthField = "month";
final String hourField = "hour";
PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer());
PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random));
Map<CharSequence, DateTools.Resolution> fieldMap = new HashMap<CharSequence,DateTools.Resolution>();
// set a field specific date resolution
@ -467,7 +467,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
}
public void testEscaped() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a\\-b:c", a, "a-b:c");
assertQueryEquals("a\\+b:c", a, "a+b:c");
@ -533,7 +533,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public void testBoost() throws Exception {
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on"));
Analyzer oneStopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true);
Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@ -548,7 +548,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
q = qp.parse("\"on\"^1.0", "field");
assertNotNull(q);
q = getParser(new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3",
q = getParser(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3",
"field");
assertNotNull(q);
}
@ -564,7 +564,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
getParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("one two three", "field");
getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
} catch (QueryNodeException expected) {
// too many boolean clauses, so ParseException is expected
@ -573,7 +573,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
// LUCENE-792
public void testNOT() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("NOT foo AND bar", a, "-foo +bar");
}
@ -582,7 +582,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
* issue has been corrected.
*/
public void testPrecedence() throws Exception {
PrecedenceQueryParser parser = getParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
PrecedenceQueryParser parser = getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query query1 = parser.parse("A AND B OR C AND D", "field");
Query query2 = parser.parse("(A AND B) OR (C AND D)", "field");
assertEquals(query1, query2);

View File

@ -80,7 +80,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setAnalyzer(new MockAnalyzer());
mfqp.setAnalyzer(new MockAnalyzer(random));
Query q = mfqp.parse("one", null);
assertEquals("b:one t:one", q.toString());
@ -150,7 +150,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
mfqp.setAnalyzer(new MockAnalyzer());
mfqp.setAnalyzer(new MockAnalyzer(random));
// Check for simple
Query q = mfqp.parse("one", null);
@ -178,24 +178,24 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
public void testStaticMethod1() throws QueryNodeException {
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer());
Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -219,15 +219,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse("one", fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+b:one -t:one", q.toString());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer());
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -240,19 +240,19 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur.MUST_NOT };
StandardQueryParser parser = new StandardQueryParser();
parser.setMultiFields(fields);
parser.setAnalyzer(new MockAnalyzer());
parser.setAnalyzer(new MockAnalyzer(random));
Query q = QueryParserUtil.parse("one", fields, flags,
new MockAnalyzer());// , fields, flags, new
new MockAnalyzer(random));// , fields, flags, new
// MockAnalyzer());
assertEquals("+b:one -t:one", q.toString());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer());
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -265,13 +265,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = QueryParserUtil.parse(queries, fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
.parse(queries, fields, flags2, new MockAnalyzer());
.parse(queries, fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -284,13 +284,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse(queries, fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
.parse(queries, fields, flags2, new MockAnalyzer());
.parse(queries, fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -316,7 +316,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
}
public void testStopWordSearching() throws Exception {
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
@ -342,7 +342,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
* Return empty tokens for field "f1".
*/
private static final class AnalyzerReturningNull extends Analyzer {
MockAnalyzer stdAnalyzer = new MockAnalyzer();
MockAnalyzer stdAnalyzer = new MockAnalyzer(random);
public AnalyzerReturningNull() {
}

View File

@ -191,7 +191,7 @@ public class TestQPHelper extends LuceneTestCase {
public StandardQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
@ -281,7 +281,7 @@ public class TestQPHelper extends LuceneTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@ -301,7 +301,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testConstantScoreAutoRewrite() throws Exception {
StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query q = qp.parse("foo*bar", "field");
assertTrue(q instanceof WildcardQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod());
@ -410,9 +410,9 @@ public class TestQPHelper extends LuceneTestCase {
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
assertQueryEquals("t<EFBFBD>rm term term", new MockAnalyzer(MockTokenizer.WHITESPACE, false),
assertQueryEquals("t<EFBFBD>rm term term", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false),
"t<EFBFBD>rm term term");
assertQueryEquals("<EFBFBD>mlaut", new MockAnalyzer(MockTokenizer.WHITESPACE, false), "<EFBFBD>mlaut");
assertQueryEquals("<EFBFBD>mlaut", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), "<EFBFBD>mlaut");
// FIXME: change MockAnalyzer to not extend CharTokenizer for this test
//assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
@ -470,7 +470,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testPunct() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@ -491,7 +491,7 @@ public class TestQPHelper extends LuceneTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@ -726,7 +726,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testEscaped() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@ -825,7 +825,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testQueryStringEscaping() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@ -866,7 +866,7 @@ public class TestQPHelper extends LuceneTestCase {
@Ignore("contrib queryparser shouldn't escape wildcard terms")
public void testEscapedWildcard() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r"));
assertEquals(q, qp.parse("foo\\?ba?r", "field"));
@ -904,7 +904,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testBoost() throws Exception {
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on"));
Analyzer oneStopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true);
Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@ -920,7 +920,7 @@ public class TestQPHelper extends LuceneTestCase {
assertNotNull(q);
StandardQueryParser qp2 = new StandardQueryParser();
qp2.setAnalyzer(new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
qp2.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
q = qp2.parse("the^3", "field");
// "the" is a stop word so the result is an empty query:
@ -950,7 +950,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testCustomQueryParserWildcard() {
try {
new QPTestParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("a?t", "contents");
new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t", "contents");
fail("Wildcard queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@ -959,7 +959,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testCustomQueryParserFuzzy() throws Exception {
try {
new QPTestParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents");
new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents");
fail("Fuzzy queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@ -970,7 +970,7 @@ public class TestQPHelper extends LuceneTestCase {
BooleanQuery.setMaxClauseCount(2);
try {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
qp.parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
@ -984,7 +984,7 @@ public class TestQPHelper extends LuceneTestCase {
*/
public void testPrecedence() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query query1 = qp.parse("A AND B OR C AND D", "field");
Query query2 = qp.parse("+A +B +C +D", "field");
@ -995,7 +995,7 @@ public class TestQPHelper extends LuceneTestCase {
// Todo: Convert from DateField to DateUtil
// public void testLocalDateFormat() throws IOException, QueryNodeException {
// Directory ramDir = newDirectory();
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
// iw.close();
@ -1116,7 +1116,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testStopwords() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton());
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true));
Query result = qp.parse("a:the OR a:foo", "a");
assertNotNull("result is null and it shouldn't be", result);
@ -1140,7 +1140,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testPositionIncrement() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
qp.setEnablePositionIncrements(true);
@ -1161,7 +1161,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testMatchAllDocs() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field"));
@ -1173,7 +1173,7 @@ public class TestQPHelper extends LuceneTestCase {
private void assertHits(int expected, String query, IndexSearcher is)
throws IOException, QueryNodeException {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query, "date");

View File

@ -41,7 +41,7 @@ public class SingleFieldTestDb {
fieldName = fName;
IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(
Version.LUCENE_CURRENT,
new MockAnalyzer()));
new MockAnalyzer(random)));
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED));

View File

@ -71,7 +71,7 @@ public class TestCartesian extends LuceneTestCase {
super.setUp();
directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
setUpPlotter( 2, 15);

View File

@ -47,7 +47,7 @@ public class TestDistance extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addData(writer);
}

View File

@ -35,7 +35,7 @@ public class TestDirectSpellChecker extends LuceneTestCase {
spellChecker.setMinQueryLength(0);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(MockTokenizer.SIMPLE, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
for (int i = 0; i < 20; i++) {
Document doc = new Document();
@ -93,7 +93,7 @@ public class TestDirectSpellChecker extends LuceneTestCase {
public void testOptions() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(MockTokenizer.SIMPLE, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(newField("text", "foobar", Field.Store.NO, Field.Index.ANALYZED));

View File

@ -46,7 +46,7 @@ public class TestLuceneDictionary extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
Document doc;

View File

@ -54,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase {
//create a user index
userindex = newDirectory();
IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < 1000; i++) {
Document doc = new Document();

View File

@ -63,7 +63,7 @@ public class TestWordnet extends LuceneTestCase {
private void assertExpandsTo(String term, String expected[]) throws IOException {
Query expandedQuery = SynExpand.expand(term, searcher, new
MockAnalyzer(), "field", 1F);
MockAnalyzer(random), "field", 1F);
BooleanQuery expectedQuery = new BooleanQuery();
for (String t : expected)
expectedQuery.add(new TermQuery(new Term("field", t)),

View File

@ -49,7 +49,7 @@ public class TestParser extends LuceneTestCase {
@BeforeClass
public static void beforeClass() throws Exception {
// TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT):
Analyzer analyzer=new MockAnalyzer(MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false);
Analyzer analyzer=new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false);
//initialize the parser
builder=new CorePlusExtensionsParser("contents",analyzer);

View File

@ -44,7 +44,7 @@ import org.xml.sax.SAXException;
public class TestQueryTemplateManager extends LuceneTestCase {
CoreParser builder;
Analyzer analyzer=new MockAnalyzer();
Analyzer analyzer=new MockAnalyzer(random);
private IndexSearcher searcher;
private Directory dir;

View File

@ -661,10 +661,13 @@ public class CheckIndex {
status.termCount++;
final DocsEnum docs2;
final boolean hasPositions;
if (postings != null) {
docs2 = postings;
hasPositions = true;
} else {
docs2 = docs;
hasPositions = false;
}
int lastDoc = -1;
@ -736,22 +739,60 @@ public class CheckIndex {
// Test skipping
if (docFreq >= 16) {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
docs = terms.docs(delDocs, docs);
final int docID = docs.advance(skipDocID);
if (docID == DocsEnum.NO_MORE_DOCS) {
break;
} else {
if (docID < skipDocID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
}
final int nextDocID = docs.nextDoc();
if (nextDocID == DocsEnum.NO_MORE_DOCS) {
if (hasPositions) {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
postings = terms.docsAndPositions(delDocs, postings);
final int docID = postings.advance(skipDocID);
if (docID == DocsEnum.NO_MORE_DOCS) {
break;
} else {
if (docID < skipDocID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
}
final int freq = postings.freq();
if (freq <= 0) {
throw new RuntimeException("termFreq " + freq + " is out of bounds");
}
int lastPosition = -1;
for(int posUpto=0;posUpto<freq;posUpto++) {
final int pos = postings.nextPosition();
if (pos < 0) {
throw new RuntimeException("position " + pos + " is out of bounds");
}
if (pos <= lastPosition) {
throw new RuntimeException("position " + pos + " is <= lastPosition " + lastPosition);
}
lastPosition = pos;
}
final int nextDocID = postings.nextDoc();
if (nextDocID == DocsEnum.NO_MORE_DOCS) {
break;
}
if (nextDocID <= docID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + "), then .next() returned docID=" + nextDocID + " vs prev docID=" + docID);
}
}
if (nextDocID <= docID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + "), then .next() returned docID=" + nextDocID + " vs prev docID=" + docID);
}
} else {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
docs = terms.docs(delDocs, docs);
final int docID = docs.advance(skipDocID);
if (docID == DocsEnum.NO_MORE_DOCS) {
break;
} else {
if (docID < skipDocID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
}
final int nextDocID = docs.nextDoc();
if (nextDocID == DocsEnum.NO_MORE_DOCS) {
break;
}
if (nextDocID <= docID) {
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + "), then .next() returned docID=" + nextDocID + " vs prev docID=" + docID);
}
}
}
}

View File

@ -18,6 +18,9 @@ package org.apache.lucene.index.codecs;
*/
import java.io.IOException;
import java.io.FileOutputStream; // for toDot
import java.io.OutputStreamWriter; // for toDot
import java.io.Writer; // for toDot
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
@ -34,6 +37,7 @@ import org.apache.lucene.util.automaton.fst.Builder;
import org.apache.lucene.util.automaton.fst.BytesRefFSTEnum;
import org.apache.lucene.util.automaton.fst.FST;
import org.apache.lucene.util.automaton.fst.PositiveIntOutputs;
import org.apache.lucene.util.automaton.fst.Util; // for toDot
/** See {@link VariableGapTermsIndexWriter}
*
@ -52,11 +56,13 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase {
// start of the field info data
protected long dirOffset;
final String segment;
public VariableGapTermsIndexReader(Directory dir, FieldInfos fieldInfos, String segment, int indexDivisor, String codecId)
throws IOException {
in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, VariableGapTermsIndexWriter.TERMS_INDEX_EXTENSION));
this.segment = segment;
boolean success = false;
try {
@ -176,6 +182,14 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase {
fst = new FST<Long>(clone, fstOutputs);
clone.close();
/*
final String dotFileName = segment + "_" + fieldInfo.name + ".dot";
Writer w = new OutputStreamWriter(new FileOutputStream(dotFileName));
Util.toDot(fst, w, false, false);
System.out.println("FST INDEX: SAVED to " + dotFileName);
w.close();
*/
if (indexDivisor > 1) {
// subsample
final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true);

View File

@ -310,7 +310,7 @@ public final class TermInfosReader {
}
} else {
assert sameTermInfo(ti, tiOrd, enumerator);
assert (int) enumerator.position == tiOrd.termOrd;
assert enumerator.position == tiOrd.termOrd;
}
} else {
ti = null;

View File

@ -233,6 +233,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
private Bits skipDocs;
private int docID;
private int freq;
private int payloadLength;
public PulsingDocsEnum(FieldInfo fieldInfo) {
omitTF = fieldInfo.omitTermFreqAndPositions;
@ -246,6 +247,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
System.arraycopy(termState.postings, 0, bytes, 0, termState.postingsSize);
postings.reset(bytes);
docID = 0;
payloadLength = 0;
freq = 1;
this.skipDocs = skipDocs;
return this;
@ -277,7 +279,6 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
// Skip positions
if (storePayloads) {
int payloadLength = -1;
for(int pos=0;pos<freq;pos++) {
final int posCode = postings.readVInt();
if ((posCode & 1) != 0) {
@ -352,6 +353,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
postings.reset(bytes);
this.skipDocs = skipDocs;
payloadLength = 0;
posPending = 0;
docID = 0;
//System.out.println("PR d&p reset storesPayloads=" + storePayloads + " bytes=" + bytes.length + " this=" + this);
return this;
@ -359,7 +361,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
@Override
public int nextDoc() throws IOException {
//System.out.println("PR d&p nextDoc this=" + this);
//System.out.println("PR.nextDoc this=" + this);
while(true) {
//System.out.println(" cycle skip posPending=" + posPending);
@ -367,15 +369,16 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
skipPositions();
if (postings.eof()) {
//System.out.println("PR END");
//System.out.println(" END");
return docID = NO_MORE_DOCS;
}
//System.out.println(" read doc code");
final int code = postings.readVInt();
docID += code >>> 1; // shift off low bit
if ((code & 1) != 0) { // if low bit is set
freq = 1; // freq is one
} else {
//System.out.println(" read freq");
freq = postings.readVInt(); // else read freq
}
posPending = freq;
@ -400,10 +403,12 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
@Override
public int advance(int target) throws IOException {
//System.out.println("PR.advance target=" + target);
int doc;
while((doc=nextDoc()) != NO_MORE_DOCS) {
//System.out.println(" nextDoc got doc=" + doc);
if (doc >= target) {
return doc;
return docID = doc;
}
}
return docID = NO_MORE_DOCS;
@ -411,7 +416,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
@Override
public int nextPosition() throws IOException {
//System.out.println("PR d&p nextPosition posPending=" + posPending + " vs freq=" + freq);
//System.out.println("PR.nextPosition posPending=" + posPending + " vs freq=" + freq);
assert posPending > 0;
posPending--;
@ -421,6 +426,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
//System.out.println("PR skip payload=" + payloadLength);
postings.skipBytes(payloadLength);
}
//System.out.println(" read pos code");
final int code = postings.readVInt();
//System.out.println("PR code=" + code);
if ((code & 1) != 0) {
@ -433,16 +439,17 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
position += postings.readVInt();
}
//System.out.println("PR d&p nextPos return pos=" + position + " this=" + this);
//System.out.println(" return pos=" + position + " hasPayload=" + !payloadRetrieved + " posPending=" + posPending + " this=" + this);
return position;
}
private void skipPositions() throws IOException {
//System.out.println("PR.skipPositions: posPending=" + posPending);
while(posPending != 0) {
nextPosition();
}
if (storePayloads && !payloadRetrieved) {
//System.out.println(" skip payload len=" + payloadLength);
//System.out.println(" skip last payload len=" + payloadLength);
postings.skipBytes(payloadLength);
payloadRetrieved = true;
}

View File

@ -201,6 +201,7 @@ public final class PulsingPostingsWriterImpl extends PostingsWriterBase {
if (!omitTF) {
int lastDocID = 0;
int pendingIDX = 0;
int lastPayloadLength = -1;
while(pendingIDX < pendingCount) {
final Position doc = pending[pendingIDX];
@ -217,7 +218,6 @@ public final class PulsingPostingsWriterImpl extends PostingsWriterBase {
}
int lastPos = 0;
int lastPayloadLength = -1;
for(int posIDX=0;posIDX<doc.termFreq;posIDX++) {
final Position pos = pending[pendingIDX++];
assert pos.docID == doc.docID;

View File

@ -69,12 +69,13 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase {
* smaller indexes, greater acceleration, but fewer accelerable cases, while
* smaller values result in bigger indexes, less acceleration and more
* accelerable cases. More detailed experiments would be useful here. */
final int skipInterval = 16;
final int skipInterval;
static final int DEFAULT_SKIP_INTERVAL = 16;
/**
* Expert: minimum docFreq to write any skip data at all
*/
final int skipMinimum = skipInterval;
final int skipMinimum;
/** Expert: The maximum number of skip levels. Smaller values result in
* slightly smaller indexes, but slower skipping in big posting lists.
@ -102,8 +103,13 @@ public final class SepPostingsWriterImpl extends PostingsWriterBase {
private final RAMOutputStream indexBytesWriter = new RAMOutputStream();
public SepPostingsWriterImpl(SegmentWriteState state, IntStreamFactory factory) throws IOException {
super();
this(state, factory, DEFAULT_SKIP_INTERVAL);
}
public SepPostingsWriterImpl(SegmentWriteState state, IntStreamFactory factory, int skipInterval) throws IOException {
super();
this.skipInterval = skipInterval;
this.skipMinimum = skipInterval; /* set to the same for now */
final String docFileName = IndexFileNames.segmentFileName(state.segmentName, state.codecId, DOC_EXTENSION);
docOut = factory.createOutput(state.directory, docFileName);
docIndex = docOut.index();

View File

@ -50,12 +50,13 @@ public final class StandardPostingsWriter extends PostingsWriterBase {
* smaller indexes, greater acceleration, but fewer accelerable cases, while
* smaller values result in bigger indexes, less acceleration and more
* accelerable cases. More detailed experiments would be useful here. */
final int skipInterval = 16;
static final int DEFAULT_SKIP_INTERVAL = 16;
final int skipInterval;
/**
* Expert: minimum docFreq to write any skip data at all
*/
final int skipMinimum = skipInterval;
final int skipMinimum;
/** Expert: The maximum number of skip levels. Smaller values result in
* slightly smaller indexes, but slower skipping in big posting lists.
@ -82,7 +83,12 @@ public final class StandardPostingsWriter extends PostingsWriterBase {
private RAMOutputStream bytesWriter = new RAMOutputStream();
public StandardPostingsWriter(SegmentWriteState state) throws IOException {
this(state, DEFAULT_SKIP_INTERVAL);
}
public StandardPostingsWriter(SegmentWriteState state, int skipInterval) throws IOException {
super();
this.skipInterval = skipInterval;
this.skipMinimum = skipInterval; /* set to the same for now */
//this.segment = state.segmentName;
String fileName = IndexFileNames.segmentFileName(state.segmentName, state.codecId, StandardCodec.FREQ_EXTENSION);
freqOut = state.directory.createOutput(fileName);

View File

@ -857,4 +857,9 @@ public class IndexSearcher {
this.leaves = leaves;
}
}
@Override
public String toString() {
return "IndexSearcher(" + reader + ")";
}
}

View File

@ -18,8 +18,8 @@ package org.apache.lucene.util;
*/
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
@ -35,18 +35,16 @@ public final class CodecUtil {
private final static int CODEC_MAGIC = 0x3fd76c17;
public static IndexOutput writeHeader(IndexOutput out, String codec, int version)
public static DataOutput writeHeader(DataOutput out, String codec, int version)
throws IOException {
final long start = out.getFilePointer();
BytesRef bytes = new BytesRef(codec);
if (bytes.length != codec.length() || bytes.length >= 128) {
throw new IllegalArgumentException("codec must be simple ASCII, less than 128 characters in length [got " + codec + "]");
}
out.writeInt(CODEC_MAGIC);
out.writeString(codec);
out.writeInt(version);
// We require this so we can easily pre-compute header length
if (out.getFilePointer()-start != codec.length()+9) {
throw new IllegalArgumentException("codec must be simple ASCII, less than 128 characters in length [got " + codec + "]");
}
return out;
}
@ -54,7 +52,7 @@ public final class CodecUtil {
return 9+codec.length();
}
public static int checkHeader(IndexInput in, String codec, int minVersion, int maxVersion)
public static int checkHeader(DataInput in, String codec, int minVersion, int maxVersion)
throws IOException {
// Safety to guard against reading a bogus string:

View File

@ -180,7 +180,13 @@ public class Builder<T> {
compileAllTargets(node);
}
final T nextFinalOutput = node.output;
final boolean isFinal = node.isFinal;
// We "fake" the node as being final if it has no
// outgoing arcs; in theory we could leave it
// as non-final (the FST can represent this), but
// FSTEnum, Util, etc., have trouble w/ non-final
// dead-end states:
final boolean isFinal = node.isFinal || node.numArcs == 0;
if (doCompile) {
// this node makes it and we now compile it. first,
@ -219,7 +225,7 @@ public class Builder<T> {
add(scratchIntsRef, output);
}
/** Sugar: adds the UTF32 chars from char[] slice. FST
/** Sugar: adds the UTF32 codepoints from char[] slice. FST
* must be FST.INPUT_TYPE.BYTE4! */
public void add(char[] s, int offset, int length, T output) throws IOException {
assert fst.getInputType() == FST.INPUT_TYPE.BYTE4;
@ -237,7 +243,7 @@ public class Builder<T> {
add(scratchIntsRef, output);
}
/** Sugar: adds the UTF32 chars from CharSequence. FST
/** Sugar: adds the UTF32 codepoints from CharSequence. FST
* must be FST.INPUT_TYPE.BYTE4! */
public void add(CharSequence s, T output) throws IOException {
assert fst.getInputType() == FST.INPUT_TYPE.BYTE4;
@ -268,6 +274,7 @@ public class Builder<T> {
// 'finalness' is stored on the incoming arc, not on
// the node
frontier[0].inputCount++;
frontier[0].isFinal = true;
fst.setEmptyOutput(output);
return;
}
@ -388,6 +395,10 @@ public class Builder<T> {
if (!arc.target.isCompiled()) {
// not yet compiled
@SuppressWarnings("unchecked") final UnCompiledNode<T> n = (UnCompiledNode<T>) arc.target;
if (n.numArcs == 0) {
//System.out.println("seg=" + segment + " FORCE final arc=" + (char) arc.label);
arc.isFinal = n.isFinal = true;
}
arc.target = compileNode(n);
}
}

View File

@ -21,12 +21,14 @@ import java.io.IOException;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.CodecUtil;
import org.apache.lucene.util.automaton.fst.Builder.UnCompiledNode;
// NOTE: while the FST is able to represent a non-final
// dead-end state (NON_FINAL_END_NODE=0), the layres above
// (FSTEnum, Util) have problems with this!!
/** Represents an FST using a compact byte[] format.
* <p> The format is similar to what's used by Morfologik
* (http://sourceforge.net/projects/morfologik).
@ -168,7 +170,7 @@ public class FST<T> {
}
// create an existing FST
public FST(IndexInput in, Outputs<T> outputs) throws IOException {
public FST(DataInput in, Outputs<T> outputs) throws IOException {
this.outputs = outputs;
writer = null;
CodecUtil.checkHeader(in, FILE_FORMAT_NAME, VERSION_START, VERSION_START);
@ -216,6 +218,9 @@ public class FST<T> {
}
void finish(int startNode) {
if (startNode == FINAL_END_NODE && emptyOutput != null) {
startNode = 0;
}
if (this.startNode != -1) {
throw new IllegalStateException("already finished");
}
@ -250,11 +255,13 @@ public class FST<T> {
writer.posWrite = posSave;
}
public void save(IndexOutput out) throws IOException {
public void save(DataOutput out) throws IOException {
if (startNode == -1) {
throw new IllegalStateException("call finish first");
}
CodecUtil.writeHeader(out, FILE_FORMAT_NAME, VERSION_CURRENT);
// TODO: really we should encode this as an arc, arriving
// to the root node, instead of special casing here:
if (emptyOutput != null) {
out.writeByte((byte) 1);
out.writeVInt(emptyOutputBytes.length);
@ -468,7 +475,9 @@ public class FST<T> {
arc.nextFinalOutput = emptyOutput;
} else {
arc.flags = BIT_LAST_ARC;
arc.nextFinalOutput = NO_OUTPUT;
}
arc.output = NO_OUTPUT;
// If there are no nodes, ie, the FST only accepts the
// empty string, then startNode is 0, and then readFirstTargetArc
@ -585,12 +594,11 @@ public class FST<T> {
* expanded array format.
*/
boolean isExpandedTarget(Arc<T> follow) throws IOException {
if (follow.isFinal()) {
if (!targetHasArcs(follow)) {
return false;
} else {
final BytesReader in = getBytesReader(follow.target);
final byte b = in.readByte();
return (b & BIT_ARCS_AS_FIXED_ARRAY) != 0;
}
}
@ -669,8 +677,11 @@ public class FST<T> {
}
if (arc.flag(BIT_STOP_NODE)) {
arc.target = FINAL_END_NODE;
arc.flags |= BIT_FINAL_ARC;
if (arc.flag(BIT_FINAL_ARC)) {
arc.target = FINAL_END_NODE;
} else {
arc.target = NON_FINAL_END_NODE;
}
arc.nextArc = in.pos;
} else if (arc.flag(BIT_TARGET_NEXT)) {
arc.nextArc = in.pos;

View File

@ -19,10 +19,10 @@ package org.apache.lucene.analysis;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
/**
@ -33,58 +33,50 @@ public final class MockAnalyzer extends Analyzer {
private final boolean lowerCase;
private final CharacterRunAutomaton filter;
private final boolean enablePositionIncrements;
private final boolean payload;
private int positionIncrementGap;
public MockAnalyzer(CharacterRunAutomaton runAutomaton, boolean lowerCase, CharacterRunAutomaton filter, boolean enablePositionIncrements) {
this(runAutomaton, lowerCase, filter, enablePositionIncrements, true);
}
private final Random random;
private Map<String,Integer> previousMappings = new HashMap<String,Integer>();
/**
* Creates a new MockAnalyzer.
*
* @param random Random for payloads behavior
* @param runAutomaton DFA describing how tokenization should happen (e.g. [a-zA-Z]+)
* @param lowerCase true if the tokenizer should lowercase terms
* @param filter DFA describing how terms should be filtered (set of stopwords, etc)
* @param enablePositionIncrements true if position increments should reflect filtered terms.
* @param payload if payloads should be added
*/
public MockAnalyzer(CharacterRunAutomaton runAutomaton, boolean lowerCase, CharacterRunAutomaton filter, boolean enablePositionIncrements, boolean payload) {
public MockAnalyzer(Random random, CharacterRunAutomaton runAutomaton, boolean lowerCase, CharacterRunAutomaton filter, boolean enablePositionIncrements) {
this.random = random;
this.runAutomaton = runAutomaton;
this.lowerCase = lowerCase;
this.filter = filter;
this.enablePositionIncrements = enablePositionIncrements;
this.payload = payload;
}
/**
* Creates a new MockAnalyzer, with no filtering.
*
* @param runAutomaton DFA describing how tokenization should happen (e.g. [a-zA-Z]+)
* @param lowerCase true if the tokenizer should lowercase terms
* Calls {@link #MockAnalyzer(Random, CharacterRunAutomaton, boolean, CharacterRunAutomaton, boolean)
* MockAnalyzer(random, runAutomaton, lowerCase, MockTokenFilter.EMPTY_STOPSET, false}).
*/
public MockAnalyzer(CharacterRunAutomaton runAutomaton, boolean lowerCase) {
this(runAutomaton, lowerCase, MockTokenFilter.EMPTY_STOPSET, false, true);
public MockAnalyzer(Random random, CharacterRunAutomaton runAutomaton, boolean lowerCase) {
this(random, runAutomaton, lowerCase, MockTokenFilter.EMPTY_STOPSET, false);
}
public MockAnalyzer(CharacterRunAutomaton runAutomaton, boolean lowerCase, boolean payload) {
this(runAutomaton, lowerCase, MockTokenFilter.EMPTY_STOPSET, false, payload);
}
/**
* Create a Whitespace-lowercasing analyzer with no stopwords removal
* Create a Whitespace-lowercasing analyzer with no stopwords removal.
* <p>
* Calls {@link #MockAnalyzer(Random, CharacterRunAutomaton, boolean, CharacterRunAutomaton, boolean)
* MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.EMPTY_STOPSET, false}).
*/
public MockAnalyzer() {
this(MockTokenizer.WHITESPACE, true);
public MockAnalyzer(Random random) {
this(random, MockTokenizer.WHITESPACE, true);
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
MockTokenizer tokenizer = new MockTokenizer(reader, runAutomaton, lowerCase);
TokenFilter filt = new MockTokenFilter(tokenizer, filter, enablePositionIncrements);
if (payload){
filt = new SimplePayloadFilter(filt, fieldName);
}
filt = maybePayload(filt, fieldName);
return filt;
}
@ -96,15 +88,19 @@ public final class MockAnalyzer extends Analyzer {
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
throws IOException {
SavedStreams saved = (SavedStreams) getPreviousTokenStream();
Map<String,SavedStreams> map = (Map) getPreviousTokenStream();
if (map == null) {
map = new HashMap<String,SavedStreams>();
setPreviousTokenStream(map);
}
SavedStreams saved = map.get(fieldName);
if (saved == null) {
saved = new SavedStreams();
saved.tokenizer = new MockTokenizer(reader, runAutomaton, lowerCase);
saved.filter = new MockTokenFilter(saved.tokenizer, filter, enablePositionIncrements);
if (payload){
saved.filter = new SimplePayloadFilter(saved.filter, fieldName);
}
setPreviousTokenStream(saved);
saved.filter = maybePayload(saved.filter, fieldName);
map.put(fieldName, saved);
return saved.filter;
} else {
saved.tokenizer.reset(reader);
@ -113,6 +109,28 @@ public final class MockAnalyzer extends Analyzer {
}
}
private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) {
Integer val = previousMappings.get(fieldName);
if (val == null) {
switch(random.nextInt(3)) {
case 0: val = -1; // no payloads
break;
case 1: val = Integer.MAX_VALUE; // variable length payload
break;
case 2: val = random.nextInt(12); // fixed length payload
break;
}
previousMappings.put(fieldName, val); // save it so we are consistent for this field
}
if (val == -1)
return stream;
else if (val == Integer.MAX_VALUE)
return new MockVariableLengthPayloadFilter(random, stream);
else
return new MockFixedLengthPayloadFilter(random, stream, val);
}
public void setPositionIncrementGap(int positionIncrementGap){
this.positionIncrementGap = positionIncrementGap;
}
@ -122,35 +140,3 @@ public final class MockAnalyzer extends Analyzer {
return positionIncrementGap;
}
}
final class SimplePayloadFilter extends TokenFilter {
String fieldName;
int pos;
final PayloadAttribute payloadAttr;
final CharTermAttribute termAttr;
public SimplePayloadFilter(TokenStream input, String fieldName) {
super(input);
this.fieldName = fieldName;
pos = 0;
payloadAttr = input.addAttribute(PayloadAttribute.class);
termAttr = input.addAttribute(CharTermAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
payloadAttr.setPayload(new Payload(("pos: " + pos).getBytes()));
pos++;
return true;
} else {
return false;
}
}
@Override
public void reset() throws IOException {
super.reset();
pos = 0;
}
}

View File

@ -0,0 +1,49 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Random;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
public final class MockFixedLengthPayloadFilter extends TokenFilter {
private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
private final Random random;
private final byte[] bytes;
private final Payload payload;
public MockFixedLengthPayloadFilter(Random random, TokenStream in, int length) {
super(in);
this.random = random;
this.bytes = new byte[length];
this.payload = new Payload(bytes);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
random.nextBytes(bytes);
payloadAtt.setPayload(payload);
return true;
} else {
return false;
}
}
}

View File

@ -0,0 +1,51 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Random;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
public final class MockVariableLengthPayloadFilter extends TokenFilter {
private static final int MAXLENGTH = 129;
private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
private final Random random;
private final byte[] bytes = new byte[MAXLENGTH];
private final Payload payload;
public MockVariableLengthPayloadFilter(Random random, TokenStream in) {
super(in);
this.random = random;
this.payload = new Payload(bytes);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
random.nextBytes(bytes);
payload.setData(bytes, 0, random.nextInt(MAXLENGTH));
payloadAtt.setPayload(payload);
return true;
} else {
return false;
}
}
}

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
@ -30,6 +31,8 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
class DocHelper {
@ -218,9 +221,9 @@ class DocHelper {
* @param doc
* @throws IOException
*/
public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException
public static SegmentInfo writeDoc(Random random, Directory dir, Document doc) throws IOException
{
return writeDoc(dir, new MockAnalyzer(MockTokenizer.WHITESPACE, false), null, doc);
return writeDoc(random, dir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), null, doc);
}
/**
@ -233,8 +236,8 @@ class DocHelper {
* @param doc
* @throws IOException
*/
public static SegmentInfo writeDoc(Directory dir, Analyzer analyzer, SimilarityProvider similarity, Document doc) throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
public static SegmentInfo writeDoc(Random random, Directory dir, Analyzer analyzer, SimilarityProvider similarity, Document doc) throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( /* LuceneTestCase.newIndexWriterConfig(random, */
TEST_VERSION_CURRENT, analyzer).setSimilarityProvider(similarity));
//writer.setUseCompoundFile(false);
writer.addDocument(doc);

View File

@ -42,6 +42,7 @@ public class RandomIndexWriter implements Closeable {
private final Random r;
int docCount;
int flushAt;
private double flushAtFactor = 1.0;
private boolean getReaderCalled;
// Randomly calls Thread.yield so we mixup thread scheduling
@ -67,7 +68,7 @@ public class RandomIndexWriter implements Closeable {
/** create a RandomIndexWriter with a random config: Uses TEST_VERSION_CURRENT and MockAnalyzer */
public RandomIndexWriter(Random r, Directory dir) throws IOException {
this(r, dir, LuceneTestCase.newIndexWriterConfig(r, LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer()));
this(r, dir, LuceneTestCase.newIndexWriterConfig(r, LuceneTestCase.TEST_VERSION_CURRENT, new MockAnalyzer(r)));
}
/** create a RandomIndexWriter with a random config: Uses TEST_VERSION_CURRENT */
@ -94,15 +95,32 @@ public class RandomIndexWriter implements Closeable {
public void addDocument(Document doc) throws IOException {
w.addDocument(doc);
maybeCommit();
}
private void maybeCommit() throws IOException {
if (docCount++ == flushAt) {
if (LuceneTestCase.VERBOSE) {
System.out.println("RIW.addDocument: now doing a commit");
System.out.println("RIW.add/updateDocument: now doing a commit at docCount=" + docCount);
}
w.commit();
flushAt += _TestUtil.nextInt(r, 10, 1000);
flushAt += _TestUtil.nextInt(r, (int) (flushAtFactor * 10), (int) (flushAtFactor * 1000));
if (flushAtFactor < 2e6) {
// gradually but exponentially increase time b/w flushes
flushAtFactor *= 1.05;
}
}
}
/**
* Updates a document.
* @see IndexWriter#updateDocument(Term, Document)
*/
public void updateDocument(Term t, Document doc) throws IOException {
w.updateDocument(t, doc);
maybeCommit();
}
public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
w.addIndexes(dirs);
}

View File

@ -120,7 +120,14 @@ public class MockRandomCodec extends Codec {
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
// we pull this before the seed intentionally: because its not consumed at runtime
// (the skipInterval is written into postings header)
int skipInterval = _TestUtil.nextInt(seedRandom, 2, 64);
if (LuceneTestCase.VERBOSE) {
System.out.println("MockRandomCodec: skipInterval=" + skipInterval);
}
final long seed = seedRandom.nextLong();
if (LuceneTestCase.VERBOSE) {
@ -136,12 +143,12 @@ public class MockRandomCodec extends Codec {
PostingsWriterBase postingsWriter;
if (random.nextBoolean()) {
postingsWriter = new SepPostingsWriterImpl(state, new MockIntStreamFactory(random));
postingsWriter = new SepPostingsWriterImpl(state, new MockIntStreamFactory(random), skipInterval);
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println("MockRandomCodec: writing Standard postings");
}
postingsWriter = new StandardPostingsWriter(state);
postingsWriter = new StandardPostingsWriter(state, skipInterval);
}
if (random.nextBoolean()) {

View File

@ -166,7 +166,7 @@ public class QueryUtils {
throws IOException {
Directory d = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter w = new IndexWriter(d, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < numDeletedDocs; i++) {
w.addDocument(new Document());
}

View File

@ -415,6 +415,9 @@ public class MockDirectoryWrapper extends Directory {
}
open = false;
if (checkIndexOnClose) {
if (LuceneTestCase.VERBOSE) {
System.out.println("\nNOTE: MockDirectoryWrapper: now run CheckIndex");
}
if (codecProvider != null) {
if (IndexReader.indexExists(this, codecProvider)) {
_TestUtil.checkIndex(this, codecProvider);

View File

@ -24,7 +24,6 @@ import java.io.IOException;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.InputStream;
import java.io.BufferedInputStream;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.GZIPInputStream;
import java.util.Random;
@ -79,8 +78,7 @@ public class LineFileDocs implements Closeable {
size *= 2.8;
}
final InputStream in = new BufferedInputStream(is, BUFFER_SIZE);
reader = new BufferedReader(new InputStreamReader(in, "UTF-8"), BUFFER_SIZE);
reader = new BufferedReader(new InputStreamReader(is, "UTF-8"), BUFFER_SIZE);
// Override sizes for currently "known" line files:
if (path.equals("europarl.lines.txt.gz")) {

View File

@ -43,13 +43,13 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestDemo extends LuceneTestCase {
public void testDemo() throws IOException, ParseException {
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
// Store the index in memory:
Directory directory = newDirectory();
// To store an index on disk, use this instead:
//Directory directory = FSDirectory.open("/tmp/testindex");
RandomIndexWriter iwriter = new RandomIndexWriter(random, directory);
RandomIndexWriter iwriter = new RandomIndexWriter(random, directory, analyzer);
iwriter.w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";

View File

@ -509,7 +509,7 @@ public class TestExternalCodecs extends LuceneTestCase {
dir.setCheckIndexOnClose(false); // we use a custom codec provider
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, true)).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setCodecProvider(provider).
setMergePolicy(newLogMergePolicy(3))
);

View File

@ -90,7 +90,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
doc.add(idField);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergeScheduler(new MyMergeScheduler())
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new MyMergeScheduler())
.setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergePolicy(newLogMergePolicy()));
LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();

View File

@ -72,7 +72,7 @@ public class TestSearch extends LuceneTestCase {
private void doTestSearch(Random random, PrintWriter out, boolean useCompoundFile)
throws Exception {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
MergePolicy mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {

View File

@ -78,7 +78,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
private void doTest(Random random, PrintWriter out, boolean useCompoundFiles) throws Exception {
Directory directory = newDirectory();
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
final MergePolicy mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {

View File

@ -29,7 +29,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** Test a configuration that behaves a lot like WhitespaceAnalyzer */
public void testWhitespace() throws Exception {
Analyzer a = new MockAnalyzer();
Analyzer a = new MockAnalyzer(random);
assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ",
new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
assertAnalyzesToReuse(a, "aba cadaba shazam",
@ -40,7 +40,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** Test a configuration that behaves a lot like SimpleAnalyzer */
public void testSimple() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
@ -51,7 +51,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** Test a configuration that behaves a lot like KeywordAnalyzer */
public void testKeyword() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.KEYWORD, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false);
assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " });
assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
@ -62,13 +62,13 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** Test a configuration that behaves a lot like StopAnalyzer */
public void testStop() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
assertAnalyzesTo(a, "the quick brown a fox",
new String[] { "quick", "brown", "fox" },
new int[] { 2, 1, 2 });
// disable positions
a = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false);
assertAnalyzesTo(a, "the quick brown a fox",
new String[] { "quick", "brown", "fox" },
new int[] { 1, 1, 1 });
@ -81,7 +81,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
BasicOperations.complement(
Automaton.union(
Arrays.asList(BasicAutomata.makeString("foo"), BasicAutomata.makeString("bar")))));
Analyzer a = new MockAnalyzer(MockTokenizer.SIMPLE, true, keepWords, true);
Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, keepWords, true);
assertAnalyzesTo(a, "quick foo brown bar bar fox foo",
new String[] { "foo", "bar", "bar", "foo" },
new int[] { 2, 2, 1, 2 });
@ -90,7 +90,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
/** Test a configuration that behaves a lot like LengthFilter */
public void testLength() throws Exception {
CharacterRunAutomaton length5 = new CharacterRunAutomaton(new RegExp(".{5,}").toAutomaton());
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, true, length5, true);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, length5, true);
assertAnalyzesTo(a, "ok toolong fine notfine",
new String[] { "ok", "fine" },
new int[] { 1, 2 });

View File

@ -19,11 +19,17 @@ package org.apache.lucene.index;
import org.apache.lucene.util.*;
import org.apache.lucene.store.*;
import org.apache.lucene.search.*;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.tokenattributes.*;
import org.apache.lucene.document.*;
import org.apache.lucene.index.codecs.CodecProvider;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import org.junit.Ignore;
// NOTE: this test will fail w/ PreFlexRW codec! (Because
@ -36,7 +42,7 @@ import org.junit.Ignore;
//
// ant compile-test
//
// java -server -Xmx2g -Xms2g -d64 -cp .:lib/junit-4.7.jar:./build/classes/test:./build/classes/java -Dlucene.version=4.0-dev -Dtests.directory=SimpleFSDirectory -Dtests.codec=Standard -DtempDir=build -ea org.junit.runner.JUnitCore org.apache.lucene.index.Test2BTerms
// java -server -Xmx8g -d64 -cp .:lib/junit-4.7.jar:./build/classes/test:./build/classes/test-framework:./build/classes/java -Dlucene.version=4.0-dev -Dtests.directory=MMapDirectory -DtempDir=build -ea org.junit.runner.JUnitCore org.apache.lucene.index.Test2BTerms
//
public class Test2BTerms extends LuceneTestCase {
@ -45,17 +51,21 @@ public class Test2BTerms extends LuceneTestCase {
private final static BytesRef bytes = new BytesRef(TOKEN_LEN);
private static final class MyTokenStream extends TokenStream {
private final static class MyTokenStream extends TokenStream {
private final int tokensPerDoc;
private int tokenCount;
private int byteUpto;
public final List<BytesRef> savedTerms = new ArrayList<BytesRef>();
private int nextSave;
private final Random random;
public MyTokenStream(int tokensPerDoc) {
public MyTokenStream(Random random, int tokensPerDoc) {
super(new MyAttributeFactory(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY));
this.tokensPerDoc = tokensPerDoc;
addAttribute(TermToBytesRefAttribute.class);
bytes.length = TOKEN_LEN;
this.random = random;
nextSave = _TestUtil.nextInt(random, 500000, 1000000);
}
@Override
@ -65,6 +75,10 @@ public class Test2BTerms extends LuceneTestCase {
}
random.nextBytes(bytes.bytes);
tokenCount++;
if (--nextSave == 0) {
savedTerms.add(new BytesRef(bytes));
nextSave = _TestUtil.nextInt(random, 500000, 1000000);
}
return true;
}
@ -131,47 +145,104 @@ public class Test2BTerms extends LuceneTestCase {
throw new RuntimeException("thist test cannot run with PreFlex codec");
}
long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000;
final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000;
int TERMS_PER_DOC = 1000000;
final int TERMS_PER_DOC = _TestUtil.nextInt(random, 100000, 1000000);
List<BytesRef> savedTerms = null;
Directory dir = newFSDirectory(_TestUtil.getTempDir("2BTerms"));
IndexWriter w = new IndexWriter(
dir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).
setRAMBufferSizeMB(256.0).
setMergeScheduler(new ConcurrentMergeScheduler()).
setMergePolicy(newLogMergePolicy(false, 10))
);
//Directory dir = newFSDirectory(new File("/p/lucene/indices/2bindex"));
if (true) {
IndexWriter w = new IndexWriter(dir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setRAMBufferSizeMB(256.0)
.setMergeScheduler(new ConcurrentMergeScheduler())
.setMergePolicy(newLogMergePolicy(false, 10)));
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogByteSizeMergePolicy) {
// 1 petabyte:
((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
MergePolicy mp = w.getConfig().getMergePolicy();
if (mp instanceof LogByteSizeMergePolicy) {
// 1 petabyte:
((LogByteSizeMergePolicy) mp).setMaxMergeMB(1024*1024*1024);
}
Document doc = new Document();
final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC);
Field field = new Field("field", ts);
field.setOmitTermFreqAndPositions(true);
field.setOmitNorms(true);
doc.add(field);
//w.setInfoStream(System.out);
final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC);
System.out.println("TERMS_PER_DOC=" + TERMS_PER_DOC);
System.out.println("numDocs=" + numDocs);
for(int i=0;i<numDocs;i++) {
final long t0 = System.currentTimeMillis();
w.addDocument(doc);
System.out.println(i + " of " + numDocs + " " + (System.currentTimeMillis()-t0) + " msec");
}
savedTerms = ts.savedTerms;
System.out.println("TEST: optimize");
w.optimize();
System.out.println("TEST: close writer");
w.close();
}
Document doc = new Document();
Field field = new Field("field", new MyTokenStream(TERMS_PER_DOC));
field.setOmitTermFreqAndPositions(true);
field.setOmitNorms(true);
doc.add(field);
//w.setInfoStream(System.out);
final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC);
for(int i=0;i<numDocs;i++) {
final long t0 = System.currentTimeMillis();
w.addDocument(doc);
System.out.println(i + " of " + numDocs + " " + (System.currentTimeMillis()-t0) + " msec");
System.out.println("TEST: open reader");
final IndexReader r = IndexReader.open(dir);
if (savedTerms == null) {
savedTerms = findTerms(r);
}
System.out.println("now optimize...");
w.optimize();
w.close();
final int numSavedTerms = savedTerms.size();
final List<BytesRef> bigOrdTerms = new ArrayList<BytesRef>(savedTerms.subList(numSavedTerms-10, numSavedTerms));
System.out.println("TEST: test big ord terms...");
testSavedTerms(r, bigOrdTerms);
System.out.println("TEST: test all saved terms...");
testSavedTerms(r, savedTerms);
r.close();
System.out.println("now CheckIndex...");
System.out.println("TEST: now CheckIndex...");
CheckIndex.Status status = _TestUtil.checkIndex(dir);
final long tc = status.segmentInfos.get(0).termIndexStatus.termCount;
assertTrue("count " + tc + " is not > " + Integer.MAX_VALUE, tc > Integer.MAX_VALUE);
dir.close();
}
private List<BytesRef> findTerms(IndexReader r) throws IOException {
System.out.println("TEST: findTerms");
final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
final List<BytesRef> savedTerms = new ArrayList<BytesRef>();
int nextSave = _TestUtil.nextInt(random, 500000, 1000000);
BytesRef term;
while((term = termsEnum.next()) != null) {
if (--nextSave == 0) {
savedTerms.add(new BytesRef(term));
System.out.println("TEST: add " + term);
nextSave = _TestUtil.nextInt(random, 500000, 1000000);
}
}
return savedTerms;
}
private void testSavedTerms(IndexReader r, List<BytesRef> terms) throws IOException {
System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
IndexSearcher s = new IndexSearcher(r);
Collections.shuffle(terms);
TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
for(int iter=0;iter<10*terms.size();iter++) {
final BytesRef term = terms.get(random.nextInt(terms.size()));
System.out.println("TEST: search " + term);
final long t0 = System.currentTimeMillis();
assertTrue(s.search(new TermQuery(new Term("field", term)), 1).totalHits > 0);
final long t1 = System.currentTimeMillis();
System.out.println(" took " + (t1-t0) + " millis");
assertEquals(TermsEnum.SeekStatus.FOUND, termsEnum.seek(term));
}
}
}

View File

@ -53,7 +53,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = null;
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer())
new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE));
writer.setInfoStream(VERBOSE ? System.out : null);
// add 100 documents
@ -64,7 +64,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMergePolicy(newLogMergePolicy(false))
);
@ -73,14 +73,14 @@ public class TestAddIndexes extends LuceneTestCase {
assertEquals(40, writer.maxDoc());
writer.close();
writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
// add 40 documents in compound files
addDocs2(writer, 50);
assertEquals(50, writer.maxDoc());
writer.close();
// test doc count before segments are merged
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
assertEquals(100, writer.maxDoc());
writer.addIndexes(aux, aux2);
assertEquals(190, writer.maxDoc());
@ -95,14 +95,14 @@ public class TestAddIndexes extends LuceneTestCase {
// now add another set in.
Directory aux3 = newDirectory();
writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
// add 40 documents
addDocs(writer, 40);
assertEquals(40, writer.maxDoc());
writer.close();
// test doc count before segments are merged/index is optimized
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
assertEquals(190, writer.maxDoc());
writer.addIndexes(aux3);
assertEquals(230, writer.maxDoc());
@ -116,7 +116,7 @@ public class TestAddIndexes extends LuceneTestCase {
verifyTermDocs(dir, new Term("content", "bbb"), 50);
// now optimize it.
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
@ -129,11 +129,11 @@ public class TestAddIndexes extends LuceneTestCase {
// now add a single document
Directory aux4 = newDirectory();
writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocs2(writer, 1);
writer.close();
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
assertEquals(230, writer.maxDoc());
writer.addIndexes(aux4);
assertEquals(231, writer.maxDoc());
@ -156,7 +156,7 @@ public class TestAddIndexes extends LuceneTestCase {
Directory aux = newDirectory();
setUpDirs(dir, aux);
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
writer.addIndexes(aux);
@ -194,7 +194,7 @@ public class TestAddIndexes extends LuceneTestCase {
Directory aux = newDirectory();
setUpDirs(dir, aux);
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
// Adds 10 docs, then replaces them with another 10
// docs, so 10 pending deletes:
@ -232,7 +232,7 @@ public class TestAddIndexes extends LuceneTestCase {
Directory aux = newDirectory();
setUpDirs(dir, aux);
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
// Adds 10 docs, then replaces them with another 10
// docs, so 10 pending deletes:
@ -273,7 +273,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = null;
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
// add 100 documents
addDocs(writer, 100);
assertEquals(100, writer.maxDoc());
@ -281,7 +281,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(1000).
setMergePolicy(newLogMergePolicy(false))
@ -291,7 +291,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer.close();
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(1000).
setMergePolicy(newLogMergePolicy(false))
@ -299,7 +299,7 @@ public class TestAddIndexes extends LuceneTestCase {
addDocs(writer, 100);
writer.close();
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
try {
// cannot add self
writer.addIndexes(aux, dir);
@ -329,7 +329,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(4))
@ -358,7 +358,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(9).
setMergePolicy(newLogMergePolicy(4))
@ -387,7 +387,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(4))
@ -422,7 +422,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(4).
setMergePolicy(newLogMergePolicy(4))
@ -448,7 +448,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = newWriter(
aux2,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(100).
setMergePolicy(newLogMergePolicy(10))
@ -475,7 +475,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(6).
setMergePolicy(newLogMergePolicy(4))
@ -536,7 +536,7 @@ public class TestAddIndexes extends LuceneTestCase {
private void setUpDirs(Directory dir, Directory aux) throws IOException {
IndexWriter writer = null;
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
// add 1000 documents in 1 segment
addDocs(writer, 1000);
assertEquals(1000, writer.maxDoc());
@ -545,7 +545,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMaxBufferedDocs(1000).
setMergePolicy(newLogMergePolicy(false, 10))
@ -556,7 +556,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer.close();
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMaxBufferedDocs(1000).
setMergePolicy(newLogMergePolicy(false, 10))
@ -575,7 +575,7 @@ public class TestAddIndexes extends LuceneTestCase {
lmp.setUseCompoundFile(false);
lmp.setMergeFactor(100);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(5).setMergePolicy(lmp));
Document doc = new Document();
@ -603,7 +603,7 @@ public class TestAddIndexes extends LuceneTestCase {
lmp.setUseCompoundFile(false);
lmp.setMergeFactor(4);
writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer())
new MockAnalyzer(random))
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
writer.addIndexes(dir);
writer.close();
@ -636,14 +636,14 @@ public class TestAddIndexes extends LuceneTestCase {
NUM_COPY = numCopy;
dir = new MockDirectoryWrapper(random, new RAMDirectory());
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = newDirectory();
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer2.setInfoStream(VERBOSE ? System.out : null);
writer2.commit();
@ -944,7 +944,7 @@ public class TestAddIndexes extends LuceneTestCase {
Directory[] dirs = new Directory[2];
for (int i = 0; i < dirs.length; i++) {
dirs[i] = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
IndexWriter writer = new IndexWriter(dirs[i], conf);
Document doc = new Document();
doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
@ -952,7 +952,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer.close();
}
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
IndexWriter writer = new IndexWriter(dirs[0], conf);
// Now delete the document
@ -992,7 +992,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexWriter writer = null;
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setCodecProvider(
new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodecProvider(
provider));
// add 100 documents
addDocs3(writer, 100);
@ -1003,7 +1003,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
aux,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setCodecProvider(provider).
setMaxBufferedDocs(10).
@ -1017,7 +1017,7 @@ public class TestAddIndexes extends LuceneTestCase {
writer = newWriter(
aux2,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setCodecProvider(provider)
);
@ -1030,7 +1030,7 @@ public class TestAddIndexes extends LuceneTestCase {
// test doc count before segments are merged
writer = newWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setCodecProvider(provider)
);
@ -1063,7 +1063,7 @@ public class TestAddIndexes extends LuceneTestCase {
Directory[] dirs = new Directory[2];
for (int i = 0; i < dirs.length; i++) {
dirs[i] = new RAMDirectory();
IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
Document d = new Document();
d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES));
w.addDocument(d);
@ -1073,7 +1073,7 @@ public class TestAddIndexes extends LuceneTestCase {
IndexReader[] readers = new IndexReader[] { IndexReader.open(dirs[0]), IndexReader.open(dirs[1]) };
Directory dir = new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy());
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy());
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setUseCompoundFile(true);
lmp.setNoCFSRatio(1.0); // Force creation of CFS

View File

@ -127,7 +127,7 @@ public class TestAtomicUpdate extends LuceneTestCase {
TimedThread[] threads = new TimedThread[4];
IndexWriterConfig conf = new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(7);
((TieredMergePolicy) conf.getMergePolicy()).setMaxMergeAtOnce(3);
IndexWriter writer = new MockIndexWriter(directory, conf);

View File

@ -132,7 +132,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
try {
writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
fail("IndexWriter creation should not pass for "+unsupportedNames[i]);
} catch (IndexFormatTooOldException e) {
// pass
@ -174,7 +174,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory dir = newFSDirectory(oldIndxeDir);
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.setInfoStream(VERBOSE ? System.out : null);
w.optimize();
w.close();
@ -194,7 +194,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory targetDir = newDirectory();
IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.addIndexes(dir);
w.close();
@ -215,7 +215,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory targetDir = newDirectory();
IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
w.addIndexes(reader);
w.close();
reader.close();
@ -268,7 +268,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
}
public void searchIndex(File indexDir, String oldName) throws IOException {
//QueryParser parser = new QueryParser("contents", new MockAnalyzer());
//QueryParser parser = new QueryParser("contents", new MockAnalyzer(random));
//Query query = parser.parse("handle:1");
Directory dir = newFSDirectory(indexDir);
@ -340,7 +340,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
Directory dir = newFSDirectory(oldIndexDir);
// open writer
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.setInfoStream(VERBOSE ? System.out : null);
// add 10 docs
for(int i=0;i<10;i++) {
@ -385,7 +385,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
searcher.close();
// optimize
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
@ -430,7 +430,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
searcher.close();
// optimize
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
writer.optimize();
writer.close();
@ -451,7 +451,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
_TestUtil.rmDir(indexDir);
Directory dir = newFSDirectory(indexDir);
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS);
IndexWriter writer = new IndexWriter(dir, conf);
@ -462,7 +462,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
// open fresh writer so we get no prx file in the added segment
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS);
writer = new IndexWriter(dir, conf);
addNoProxDoc(writer);
@ -498,7 +498,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMaxBufferedDocs(-1).
setRAMBufferSizeMB(16.0).
setMergePolicy(mergePolicy)

View File

@ -34,7 +34,7 @@ public class TestCheckIndex extends LuceneTestCase {
public void testDeletedDocs() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++) {

View File

@ -321,7 +321,7 @@ public class TestCodecs extends LuceneTestCase {
public void testSepPositionAfterMerge() throws IOException {
final Directory dir = newDirectory();
final IndexWriterConfig config = newIndexWriterConfig(Version.LUCENE_31,
new MockAnalyzer());
new MockAnalyzer(random));
config.setCodecProvider(new MockSepCodecs());
final IndexWriter writer = new IndexWriter(dir, config);

View File

@ -72,7 +72,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
FailOnlyOnFlush failure = new FailOnlyOnFlush();
directory.failOn(failure);
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
@ -130,7 +130,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
// start:
mp.setMinMergeDocs(1000);
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMergePolicy(mp));
writer.setInfoStream(VERBOSE ? System.out : null);
@ -169,7 +169,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
public void testNoExtraFiles() throws IOException {
MockDirectoryWrapper directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
@ -189,7 +189,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
// Reopen
writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
}
@ -207,7 +207,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
IndexWriter writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(100))
);
@ -240,7 +240,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
// Reopen
writer = new IndexWriter(
directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.APPEND).
setMergePolicy(newLogMergePolicy(100))
);

View File

@ -35,7 +35,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
public void testSameFieldNumbersAcrossSegments() throws Exception {
for (int i = 0; i < 2; i++) {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d1 = new Document();
d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO));
@ -44,7 +44,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
if (i == 1) {
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
} else {
writer.commit();
}
@ -72,7 +72,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
assertEquals("f3", fis2.fieldInfo(2).name);
assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.optimize();
writer.close();
@ -96,7 +96,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
public void testAddIndexes() throws Exception {
Directory dir1 = newDirectory();
Directory dir2 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d1 = new Document();
d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO));
@ -104,7 +104,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
writer.addDocument(d1);
writer.close();
writer = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
Document d2 = new Document();
d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO));
@ -115,7 +115,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
writer.close();
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
writer.addIndexes(dir2);
writer.close();
@ -134,7 +134,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
assertEquals("f3", fis2.fieldInfo(2).name);
assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
writer.optimize();
writer.close();
@ -159,7 +159,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
Directory dir = newDirectory();
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
NoMergePolicy.NO_COMPOUND_FILES));
Document d = new Document();
d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED,
@ -180,7 +180,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES
: NoMergePolicy.COMPOUND_FILES));
Document d = new Document();
@ -205,7 +205,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES
: NoMergePolicy.COMPOUND_FILES));
Document d = new Document();
@ -237,7 +237,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
{
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES
: NoMergePolicy.COMPOUND_FILES));
writer.deleteDocuments(new Term("f1", "d1"));
@ -248,7 +248,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
}
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
new LogByteSizeMergePolicy()));
writer.optimize();
assertFalse(" field numbers got mixed up", writer.anyNonBulkMerges);
@ -281,7 +281,7 @@ public class TestConsistentFieldNumbers extends LuceneTestCase {
}
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();

View File

@ -36,7 +36,7 @@ public class TestCrash extends LuceneTestCase {
private IndexWriter initIndex(Random random, MockDirectoryWrapper dir, boolean initialCommit) throws IOException {
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler()));
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).setSuppressExceptions();
if (initialCommit) {

View File

@ -204,7 +204,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
Directory dir = newDirectory();
ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer())
new MockAnalyzer(random))
.setIndexDeletionPolicy(policy);
MergePolicy mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -221,7 +221,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
// past commits
lastDeleteTime = System.currentTimeMillis();
conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setOpenMode(
new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -303,7 +303,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
policy.dir = dir;
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setIndexDeletionPolicy(policy).setMaxBufferedDocs(10)
.setMergeScheduler(new SerialMergeScheduler());
MergePolicy mp = conf.getMergePolicy();
@ -324,7 +324,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
}
if (!isOptimized) {
conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setOpenMode(
new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -373,7 +373,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
int preCount = dir.listAll().length;
writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT,
new MockAnalyzer()).setOpenMode(
new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy));
writer.close();
int postCount = dir.listAll().length;
@ -397,7 +397,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
IndexWriter writer = new IndexWriter(
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setIndexDeletionPolicy(policy).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(10))
@ -419,7 +419,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertTrue(lastCommit != null);
// Now add 1 doc and optimize
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(policy));
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
addDoc(writer);
assertEquals(11, writer.numDocs());
writer.optimize();
@ -428,7 +428,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(6, IndexReader.listCommits(dir).size());
// Now open writer on the commit just before optimize:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs());
@ -441,7 +441,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
assertEquals(11, r.numDocs());
r.close();
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs());
// Commits the rollback:
@ -458,7 +458,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
r.close();
// Reoptimize
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(policy));
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
writer.optimize();
writer.close();
@ -469,7 +469,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
// Now open writer on the commit just before optimize,
// but this time keeping only the last commit:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexCommit(lastCommit));
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs());
// Reader still sees optimized index, because writer
@ -505,7 +505,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
MergePolicy mp = conf.getMergePolicy();
@ -518,7 +518,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
}
writer.close();
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -558,7 +558,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
for(int j=0;j<N+1;j++) {
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
MergePolicy mp = conf.getMergePolicy();
@ -618,7 +618,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy).setMergePolicy(newLogMergePolicy());
MergePolicy mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -634,7 +634,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
System.out.println("\nTEST: cycle i=" + i);
}
conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -662,7 +662,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
reader.close();
searcher.close();
}
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy);
mp = conf.getMergePolicy();
if (mp instanceof LogMergePolicy) {
@ -741,7 +741,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
MergePolicy mp = conf.getMergePolicy();
@ -756,7 +756,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
for(int i=0;i<N+1;i++) {
conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy)
.setMaxBufferedDocs(10);
mp = conf.getMergePolicy();
@ -780,7 +780,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
searcher.close();
writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer())
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy));
// This will not commit: there are no changes
// pending because we opened for "create":

Some files were not shown because too many files have changed in this diff Show More