catch up with trunk and moved DocValues to PerDoc Codec API

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/docvalues@1098566 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2011-05-02 13:50:57 +00:00
commit a54567963a
659 changed files with 70573 additions and 9486 deletions

View File

@ -19,7 +19,7 @@
<project name="lucene-solr" default="test" basedir=".">
<import file="common-build.xml"/>
<target name="test" description="Test both Lucene and Solr">
<target name="test" description="Test both Lucene and Solr" depends="validate">
<sequential>
<subant target="test" inheritall="false" failonerror="true">
<fileset dir="lucene" includes="build.xml" />
@ -35,7 +35,7 @@
<fileset dir="solr" includes="build.xml" />
</subant></sequential>
</target>
<target name="compile" depends="validate" description="Compile Lucene and Solr">
<target name="compile" description="Compile Lucene and Solr">
<sequential>
<subant target="compile" inheritall="false" failonerror="true">

View File

@ -57,6 +57,7 @@
<classpathentry kind="src" path="solr/src/webapp/src"/>
<classpathentry kind="src" path="solr/src/common"/>
<classpathentry kind="src" path="solr/src/solrj"/>
<classpathentry kind="src" path="solr/src/test-framework"/>
<classpathentry kind="src" path="solr/src/test"/>
<classpathentry kind="src" path="solr/src/test-files"/>
<classpathentry kind="src" path="solr/contrib/analysis-extras/src/java"/>

View File

@ -9,10 +9,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -27,5 +23,9 @@
</orderEntry>
<orderEntry type="library" name="Ant" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,20 +8,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
<library>
<CLASSES>
<root url="file://$MODULE_DIR$/lib" />
</CLASSES>
<JAVADOC />
<SOURCES />
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,11 +8,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,10 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -24,5 +20,9 @@
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,11 +8,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,10 +8,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -22,5 +20,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,11 +9,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,12 +8,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,13 +10,13 @@
<sourceFolder url="file://$MODULE_DIR$/src/demo/WebContent" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="library" name="Servlet API 2.4" level="project" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,9 +10,9 @@
<sourceFolder url="file://$MODULE_DIR$/src/java/tools" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,8 +10,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -25,5 +23,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -8,8 +8,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -23,5 +21,7 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,10 +9,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,10 +9,10 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/resources" isTestSource="false" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,13 +10,6 @@
<excludeFolder url="file://$MODULE_DIR$/temp" />
<excludeFolder url="file://$MODULE_DIR$/work" />
</content>
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -30,5 +23,12 @@
</library>
</orderEntry>
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,12 +10,6 @@
<excludeFolder url="file://$MODULE_DIR$/build" />
<excludeFolder url="file://$MODULE_DIR$/lucene-libs" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="smartcn" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="stempel" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -30,5 +24,11 @@
</orderEntry>
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="smartcn" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="stempel" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -10,16 +10,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/test/java" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
@ -35,5 +25,15 @@
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="library" scope="TEST" name="JUnit" level="project" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="memory" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="common" />
</component>
</module>

View File

@ -11,12 +11,12 @@
<sourceFolder url="file://$MODULE_DIR$/src/test/resources" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
</component>
</module>

View File

@ -9,14 +9,14 @@
<sourceFolder url="file://$MODULE_DIR$/test/java" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/test/resources" isTestSource="true" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="dataimporthandler" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr DIH library" level="project" />
<orderEntry type="library" name="Solr extraction library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="dataimporthandler" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -10,11 +10,11 @@
<sourceFolder url="file://$MODULE_DIR$/src/main/java" isTestSource="false" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr extraction library" level="project" />
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -11,8 +11,6 @@
<sourceFolder url="file://$MODULE_DIR$/src/main/resources" isTestSource="false" />
<excludeFolder url="file://$MODULE_DIR$/build" />
</content>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
@ -26,5 +24,7 @@
<jarDirectory url="file://$MODULE_DIR$/lib" recursive="false" />
</library>
</orderEntry>
<orderEntry type="module" module-name="solr" />
<orderEntry type="module" module-name="lucene" scope="TEST" />
</component>
</module>

View File

@ -12,23 +12,24 @@
<sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/test-files" isTestSource="true" />
<sourceFolder url="file://$MODULE_DIR$/src/test-framework" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/build" />
<excludeFolder url="file://$MODULE_DIR$/dist" />
<excludeFolder url="file://$MODULE_DIR$/lucene-libs" />
<excludeFolder url="file://$MODULE_DIR$/package" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr example library" level="project" />
<orderEntry type="module" module-name="spatial" />
<orderEntry type="module" module-name="highlighter" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="icu" />
<orderEntry type="module" module-name="queries" />
<orderEntry type="module" module-name="misc" />
<orderEntry type="module" module-name="phonetic" />
<orderEntry type="module" module-name="spellchecker" />
<orderEntry type="module" module-name="common" />
<orderEntry type="module" module-name="lucene" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="Solr library" level="project" />
<orderEntry type="library" name="Solr example library" level="project" />
</component>
</module>

View File

@ -132,6 +132,11 @@
<artifactId>lucene-icu4j</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>r05</version>
</dependency>
<dependency>
<groupId>com.sleepycat</groupId>
<artifactId>berkeleydb</artifactId>

View File

@ -44,9 +44,8 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
@ -85,6 +84,11 @@
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<directory>${build-directory}</directory>
@ -94,7 +98,7 @@
<testSourceDirectory>src/test</testSourceDirectory>
<testResources>
<testResource>
<directory>test-files</directory>
<directory>src/test-files</directory>
</testResource>
<testResource>
<directory>../../src/test-files</directory>

View File

@ -44,9 +44,8 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -44,9 +44,8 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -43,10 +43,9 @@
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<groupId>org.apache.solr</groupId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -47,9 +47,8 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -44,9 +44,8 @@
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<artifactId>solr-test-framework</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -35,6 +35,7 @@
<module>src</module>
<module>src/solrj</module>
<module>src/webapp</module>
<module>src/test-framework</module>
<module>contrib</module>
</modules>
<properties>

View File

@ -156,6 +156,11 @@
<artifactId>servlet-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@ -197,17 +202,6 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
@ -248,6 +242,24 @@
</programs>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-test-source</id>
<phase>generate-test-sources</phase>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>test-framework</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -42,12 +42,6 @@
<artifactId>lucene-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-test-framework</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analyzers-common</artifactId>
@ -88,11 +82,6 @@
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<directory>${build-directory}</directory>

View File

@ -0,0 +1,76 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.solr</groupId>
<artifactId>solr-parent</artifactId>
<version>@version@</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<groupId>org.apache.solr</groupId>
<artifactId>solr-test-framework</artifactId>
<packaging>jar</packaging>
<name>Apache Solr Test Framework</name>
<description>Apache Solr Test Framework</description>
<properties>
<module-directory>solr/src/test-framework</module-directory>
<build-directory>../../build</build-directory>
</properties>
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>solr-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-test-framework</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
</dependencies>
<build>
<directory>${build-directory}</directory>
<outputDirectory>${build-directory}/classes/test-framework</outputDirectory>
<sourceDirectory>.</sourceDirectory>
<resources>
<resource>
<directory>.</directory>
<excludes>
<exclude>**/*.java</exclude>
</excludes>
</resource>
</resources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<skip>true</skip>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,5 +0,0 @@
build
dist
*~
velocity.log
build.properties

View File

@ -141,6 +141,11 @@ Changes in backwards compatibility policy
* LUCENE-2315: AttributeSource's methods for accessing attributes are now final,
else its easy to corrupt the internal states. (Uwe Schindler)
* LUCENE-2814: The IndexWriter.flush method no longer takes "boolean
flushDocStores" argument, as we now always flush doc stores (index
files holding stored fields and term vectors) while flushing a
segment. (Mike McCandless)
Changes in Runtime Behavior
* LUCENE-2846: omitNorms now behaves like omitTermFrequencyAndPositions, if you
@ -157,11 +162,10 @@ Changes in Runtime Behavior
* LUCENE-2720: IndexWriter throws IndexFormatTooOldException on open, rather
than later when e.g. a merge starts. (Shai Erera, Mike McCandless, Uwe Schindler)
* LUCENE-1076: The default merge policy is now able to merge
non-contiguous segments, which means docIDs no longer necessarily
say "in order". If this is a problem then you can use either of the
LogMergePolicy impls, and call setRequireContiguousMerge(true).
(Mike McCandless)
* LUCENE-1076: The default merge policy (TieredMergePolicy) is now
able to merge non-contiguous segments, which means docIDs no longer
necessarily say "in order". If this is a problem then you can use
either of the LogMergePolicy impls. (Mike McCandless)
* LUCENE-2881: FieldInfos is now tracked per segment. Before it was tracked
per IndexWriter session, which resulted in FieldInfos that had the FieldInfo
@ -170,6 +174,70 @@ Changes in Runtime Behavior
successful commit. The corresponding file format changes are backwards-
compatible. (Michael Busch, Simon Willnauer)
* LUCENE-2956, LUCENE-2573, LUCENE-2324, LUCENE-2555: Changes from
DocumentsWriterPerThread:
- IndexWriter now uses a DocumentsWriter per thread when indexing documents.
Each DocumentsWriterPerThread indexes documents in its own private segment,
and the in memory segments are no longer merged on flush. Instead, each
segment is separately flushed to disk and subsequently merged with normal
segment merging.
- DocumentsWriterPerThread (DWPT) is now flushed concurrently based on a
FlushPolicy. When a DWPT is flushed, a fresh DWPT is swapped in so that
indexing may continue concurrently with flushing. The selected
DWPT flushes all its RAM resident documents do disk. Note: Segment flushes
don't flush all RAM resident documents but only the documents private to
the DWPT selected for flushing.
- Flushing is now controlled by FlushPolicy that is called for every add,
update or delete on IndexWriter. By default DWPTs are flushed either on
maxBufferedDocs per DWPT or the global active used memory. Once the active
memory exceeds ramBufferSizeMB only the largest DWPT is selected for
flushing and the memory used by this DWPT is substracted from the active
memory and added to a flushing memory pool, which can lead to temporarily
higher memory usage due to ongoing indexing.
- IndexWriter now can utilize ramBufferSize > 2048 MB. Each DWPT can address
up to 2048 MB memory such that the ramBufferSize is now bounded by the max
number of DWPT avaliable in the used DocumentsWriterPerThreadPool.
IndexWriters net memory consumption can grow far beyond the 2048 MB limit if
the applicatoin can use all available DWPTs. To prevent a DWPT from
exhausting its address space IndexWriter will forcefully flush a DWPT if its
hard memory limit is exceeded. The RAMPerThreadHardLimitMB can be controlled
via IndexWriterConfig and defaults to 1945 MB.
Since IndexWriter flushes DWPT concurrently not all memory is released
immediately. Applications should still use a ramBufferSize significantly
lower than the JVMs avaliable heap memory since under high load multiple
flushing DWPT can consume substantial transient memory when IO performance
is slow relative to indexing rate.
- IndexWriter#commit now doesn't block concurrent indexing while flushing all
'currently' RAM resident documents to disk. Yet, flushes that occur while a
a full flush is running are queued and will happen after all DWPT involved
in the full flush are done flushing. Applications using multiple threads
during indexing and trigger a full flush (eg call commmit() or open a new
NRT reader) can use significantly more transient memory.
- IndexWriter#addDocument and IndexWriter.updateDocument can block indexing
threads if the number of active + number of flushing DWPT exceed a
safety limit. By default this happens if 2 * max number available thread
states (DWPTPool) is exceeded. This safety limit prevents applications from
exhausting their available memory if flushing can't keep up with
concurrently indexing threads.
- IndexWriter only applies and flushes deletes if the maxBufferedDelTerms
limit is reached during indexing. No segment flushes will be triggered
due to this setting.
- IndexWriter#flush(boolean, boolean) doesn't synchronized on IndexWriter
anymore. A dedicated flushLock has been introduced to prevent multiple full-
flushes happening concurrently.
- DocumentsWriter doesn't write shared doc stores anymore.
(Mike McCandless, Michael Busch, Simon Willnauer)
API Changes
* LUCENE-2302, LUCENE-1458, LUCENE-2111, LUCENE-2514: Terms are no longer
@ -334,6 +402,16 @@ New features
* LUCENE-2862: Added TermsEnum.totalTermFreq() and
Terms.getSumTotalTermFreq(). (Mike McCandless, Robert Muir)
* LUCENE-3001: Added TrieFieldHelper to write solr compatible numeric
fields without the solr dependency. (ryan)
* LUCENE-3003: Added new expert class oal.index.DocTermsOrd,
refactored from Solr's UnInvertedField, for accessing term ords for
multi-valued fields, per document. This is similar to FieldCache in
that it inverts the index to compute the ords, but differs in that
it's able to handle multi-valued fields and does not hold the term
bytes in RAM. (Mike McCandless)
Optimizations
* LUCENE-2588: Don't store unnecessary suffixes when writing the terms
@ -366,6 +444,21 @@ Bug fixes
* LUCENE-2936: PhraseQuery score explanations were not correctly
identifying matches vs non-matches. (hossman)
* LUCENE-2996: addIndexes(IndexReader) did not flush before adding the new
indexes, causing existing deletions to be applied on the incoming indexes as
well. (Shai Erera, Mike McCandless)
Test Cases
* LUCENE-3002: added 'tests.iter.min' to control 'tests.iter' by allowing to
stop iterating if at least 'tests.iter.min' ran and a failure occured.
(Shai Erera, Chris Hostetter)
Build
* LUCENE-3006: Building javadocs will fail on warnings by default.
Override with -Dfailonjavadocwarning=false (sarowe, gsingers)
======================= Lucene 3.x (not yet released) =======================
Changes in backwards compatibility policy
@ -380,7 +473,21 @@ Optimizations
* LUCENE-2990: ArrayUtil/CollectionUtil.*Sort() methods now exit early
on empty or one-element lists/arrays. (Uwe Schindler)
======================= Lucene 3.1 (not yet released) =======================
Bug fixes
* LUCENE-3024: Index with more than 2.1B terms was hitting AIOOBE when
seeking TermEnum (eg used by Solr's faceting) (Tom Burton-West, Mike
McCandless)
* LUCENE-3042: When a filter or consumer added Attributes to a TokenStream
chain after it was already (partly) consumed [or clearAttributes(),
captureState(), cloneAttributes(),... was called by the Tokenizer],
the Tokenizer calling clearAttributes() or capturing state after addition
may not do this on the newly added Attribute. This bug affected only
very special use cases of the TokenStream-API, most users would not
have recognized it. (Uwe Schindler, Robert Muir)
======================= Lucene 3.1.0 =======================
Changes in backwards compatibility policy
@ -676,6 +783,9 @@ API Changes
for AttributeImpls, but can still be provided (if needed).
(Uwe Schindler)
* LUCENE-2691: Deprecate IndexWriter.getReader in favor of
IndexReader.open(IndexWriter) (Grant Ingersoll, Mike McCandless)
* LUCENE-2876: Deprecated Scorer.getSimilarity(). If your Scorer uses a Similarity,
it should keep it itself. Fixed Scorers to pass their parent Weight, so that
Scorer.visitSubScorers (LUCENE-2590) will work correctly.
@ -775,6 +885,9 @@ Bug fixes
been rounded down to 0 instead of being rounded up to the smallest
positive number. (yonik)
* LUCENE-2936: PhraseQuery score explanations were not correctly
identifying matches vs non-matches. (hossman)
* LUCENE-2975: A hotspot bug corrupts IndexInput#readVInt()/readVLong() if
the underlying readByte() is inlined (which happens e.g. in MMapDirectory).
The loop was unwinded which makes the hotspot bug disappear.
@ -895,6 +1008,9 @@ New features
FieldInvertState so that it can be used in Similarity.computeNorm.
(Robert Muir)
* LUCENE-2720: Segments now record the code version which created them.
(Shai Erera, Mike McCandless, Uwe Schindler)
* LUCENE-2474: Added expert ReaderFinishedListener API to
IndexReader, to allow apps that maintain external per-segment caches
to evict entries when a segment is finished. (Shay Banon, Yonik
@ -1042,8 +1158,8 @@ Build
generating Maven artifacts (Steven Rowe)
* LUCENE-2609: Added jar-test-framework Ant target which packages Lucene's
tests' framework classes. (Drew Farris, Grant Ingersoll, Shai Erera, Steven
Rowe)
tests' framework classes. (Drew Farris, Grant Ingersoll, Shai Erera,
Steven Rowe)
Test Cases

View File

@ -356,3 +356,9 @@ LUCENE-1458, LUCENE-2111: Flexible Indexing
field as a parameter, this is removed due to the fact the entire Similarity (all methods)
can now be configured per-field.
Methods that apply to the entire query such as coord() and queryNorm() exist in SimilarityProvider.
* LUCENE-1076: TieredMergePolicy is now the default merge policy.
It's able to merge non-contiguous segments; this may cause problems
for applications that rely on Lucene's internal document ID
assigment. If so, you should instead use LogByteSize/DocMergePolicy
during indexing.

View File

@ -152,6 +152,7 @@
<echo>DEPRECATED - Doing Nothing. See http://wiki.apache.org/lucene-java/HowToUpdateTheWebsite</echo>
</target>
<target name="javadoc" depends="javadocs"/>
<target name="javadocs" description="Generate javadoc"
depends="javadocs-all, javadocs-core, javadocs-contrib, javadocs-test-framework">
<echo file="${javadoc.dir}/index.html" append="false">
@ -194,6 +195,17 @@
<target name="javadocs-all" description="Generate javadoc for core and contrib classes" depends="build-contrib">
<sequential>
<mkdir dir="${javadoc.dir}/all"/>
<path id="javadoc.classpath">
<path refid="classpath"/>
<pathelement location="${ant.home}/lib/ant.jar"/>
<fileset dir=".">
<exclude name="build/**/*.jar"/>
<include name="**/lib/*.jar"/>
</fileset>
<pathelement location="${common.dir}/../modules/analysis/build/common/lucene-analyzers-common-${version}.jar"/>
</path>
<invoke-javadoc
destdir="${javadoc.dir}/all">
<sources>
@ -424,9 +436,12 @@
</scp>
</target>
<target name="stage" depends="dist-all, generate-maven-artifacts, sign-artifacts, copy-to-stage">
<target name="prepare-release" depends="clean, dist-all, generate-maven-artifacts, sign-artifacts"/>
<target name="stage" depends="prepare-release, copy-to-stage">
</target>
<target name="generate-maven-artifacts"
depends="maven.ant.tasks-check, package, jar-src, jar-test-framework-src, javadocs">
<sequential>

View File

@ -78,6 +78,7 @@
<property name="tests.directory" value="random" />
<property name="tests.linedocsfile" value="europarl.lines.txt.gz" />
<property name="tests.iter" value="1" />
<property name="tests.iter.min" value="${tests.iter}" />
<property name="tests.seed" value="random" />
<property name="tests.loggingfile" value="/dev/null"/>
<property name="tests.nightly" value="false" />
@ -102,6 +103,7 @@
<path refid="classpath"/>
<pathelement location="${ant.home}/lib/ant.jar"/>
<fileset dir=".">
<exclude name="build/**/*.jar"/>
<include name="**/lib/*.jar"/>
</fileset>
</path>
@ -306,7 +308,7 @@
</copy>
</target>
<target name="compile" depends="compile-core, validate-lucene">
<target name="compile" depends="compile-core">
<!-- convenience target to compile core -->
</target>
@ -507,6 +509,8 @@
<sysproperty key="tests.linedocsfile" value="${tests.linedocsfile}"/>
<!-- set the number of times tests should run -->
<sysproperty key="tests.iter" value="${tests.iter}"/>
<!-- set the minimum number of times tests should run unless failure -->
<sysproperty key="tests.iter.min" value="${tests.iter.min}"/>
<!-- set the test seed -->
<sysproperty key="tests.seed" value="${tests.seed}"/>
<!-- set the Version that tests should run against -->
@ -561,7 +565,7 @@
</sequential>
</macrodef>
<target name="test" depends="compile-test,junit-mkdir,junit-sequential,junit-parallel" description="Runs unit tests"/>
<target name="test" depends="compile-test,validate-lucene,junit-mkdir,junit-sequential,junit-parallel" description="Runs unit tests"/>
<target name="junit-mkdir">
<mkdir dir="${junit.output.dir}"/>
@ -760,6 +764,7 @@
</sequential>
</macrodef>
<property name="failonjavadocwarning" value="true"/>
<macrodef name="invoke-javadoc">
<element name="sources" optional="yes"/>
<attribute name="destdir"/>
@ -769,6 +774,7 @@
<copy todir="@{destdir}/../prettify" overwrite="false">
<fileset dir="${prettify.dir}"/>
</copy>
<record name="@{destdir}/log_javadoc.txt" action="start" append="no"/>
<javadoc
overview="@{overview}"
packagenames="org.apache.lucene.*"
@ -780,6 +786,7 @@
author="true"
version="true"
use="true"
failonerror="true"
source="${ant.java.version}"
link="${javadoc.link}"
windowtitle="${Name} ${version} API"
@ -801,6 +808,26 @@
<classpath refid="javadoc.classpath"/>
</javadoc>
<record name="@{destdir}/log_javadoc.txt" action="stop"/>
<delete>
<fileset file="@{destdir}/log_javadoc.txt">
<not>
<containsregexp expression="\[javadoc\]\s*[1-9][0-9]*[\s]*warning"/>
</not>
</fileset>
</delete>
<fail message="Javadocs warnings were found!">
<condition>
<and>
<available file="@{destdir}/log_javadoc.txt"/>
<istrue value="${failonjavadocwarning}"/>
</and>
</condition>
</fail>
</sequential>
</macrodef>

View File

@ -4,20 +4,13 @@ Lucene contrib change Log
Build
* LUCENE-2413: Moved the demo out of lucene core and into contrib/demo.
(Robert Muir)
* LUCENE-2845: Moved contrib/benchmark to modules.
New Features
* LUCENE-2604: Added RegexpQuery support to contrib/queryparser.
(Simon Willnauer, Robert Muir)
* LUCENE-2500: Added DirectIOLinuxDirectory, a Linux-specific
Directory impl that uses the O_DIRECT flag to bypass the buffer
cache. This is useful to prevent segment merging from evicting
pages from the buffer cache, since fadvise/madvise do not seem.
(Michael McCandless)
* LUCENE-2373: Added a Codec implementation that works with append-only
filesystems (such as e.g. Hadoop DFS). SegmentInfos writing/reading
code is refactored to support append-only FS, and to allow for future
@ -32,10 +25,8 @@ New Features
* LUCENE-2507: Added DirectSpellChecker, which retrieves correction candidates directly
from the term dictionary using levenshtein automata. (Robert Muir)
* LUCENE-2791: Added WindowsDirectory, a Windows-specific Directory impl
that doesn't synchronize on the file handle. This can be useful to
avoid the performance problems of SimpleFSDirectory and NIOFSDirectory.
(Robert Muir, Simon Willnauer, Uwe Schindler, Michael McCandless)
* LUCENE-2836: Add FieldCacheRewriteMethod, which rewrites MultiTermQueries
using the FieldCache's TermsEnum. (Robert Muir)
API Changes
@ -52,11 +43,38 @@ API Changes
for different fields, this way all parameters (such as TF factors) can be
customized on a per-field basis. (Robert Muir)
Bug Fixes
* LUCENE-3045: fixed QueryNodeImpl.containsTag(String key) that was
not lowercasing the key before checking for the tag (Adriano Crestani)
======================= Lucene 3.x (not yet released) =======================
(No changes)
Bug Fixes
======================= Lucene 3.1 (not yet released) =======================
* LUCENE-3045: fixed QueryNodeImpl.containsTag(String key) that was
not lowercasing the key before checking for the tag (Adriano Crestani)
* LUCENE-3026: SmartChineseAnalyzer's WordTokenFilter threw NullPointerException
on sentences longer than 32,767 characters. (wangzhenghang via Robert Muir)
* LUCENE-2939: Highlighter should try and use maxDocCharsToAnalyze in
WeightedSpanTermExtractor when adding a new field to MemoryIndex as well as
when using CachingTokenStream. This can be a significant performance bug for
large documents. (Mark Miller)
* LUCENE-3043: GermanStemmer threw IndexOutOfBoundsException if it encountered
a zero-length token. (Robert Muir)
* LUCENE-3044: ThaiWordFilter didn't reset its cached state correctly, this only
caused a problem if you consumed a tokenstream, then reused it, added different
attributes to it, and consumed it again. (Robert Muir, Uwe Schindler)
New Features
* LUCENE-3016: Add analyzer for Latvian. (Robert Muir)
======================= Lucene 3.1.0 =======================
Changes in backwards compatibility policy
@ -83,6 +101,14 @@ Changes in backwards compatibility policy
* LUCENE-2581: Added new methods to FragmentsBuilder interface. These methods
are used to set pre/post tags and Encoder. (Koji Sekiguchi)
* LUCENE-2391: Improved spellchecker (re)build time/ram usage by omitting
frequencies/positions/norms for single-valued fields, modifying the default
ramBufferMBSize to match IndexWriterConfig (16MB), making index optimization
an optional boolean parameter, and modifying the incremental update logic
to work well with unoptimized spellcheck indexes. The indexDictionary() methods
were made final to ensure a hard backwards break in case you were subclassing
Spellchecker. In general, subclassing Spellchecker is not recommended. (Robert Muir)
Changes in runtime behavior
* LUCENE-2117: SnowballAnalyzer uses TurkishLowerCaseFilter instead of
@ -96,6 +122,11 @@ Changes in runtime behavior
Bug fixes
* LUCENE-2855: contrib queryparser was using CharSequence as key in some internal
Map instances, which was leading to incorrect behavior, since some CharSequence
implementors do not override hashcode and equals methods. Now the internal Maps
are using String instead. (Adriano Crestani)
* LUCENE-2068: Fixed ReverseStringFilter which was not aware of supplementary
characters. During reverse the filter created unpaired surrogates, which
will be replaced by U+FFFD by the indexer, but not at query time. The filter
@ -122,35 +153,42 @@ Bug fixes
For matchVersion >= 3.1 the filter also no longer lowercases. ThaiAnalyzer
will use a separate LowerCaseFilter instead. (Uwe Schindler, Robert Muir)
* LUCENE-2615: Fix DirectIOLinuxDirectory to not assign bogus
* LUCENE-2615: Fix DirectIOLinuxDirectory to not assign bogus
permissions to newly created files, and to not silently hardwire
buffer size to 1 MB. (Mark Miller, Robert Muir, Mike McCandless)
* LUCENE-2629: Fix gennorm2 task for generating ICUFoldingFilter's .nrm file. This allows
* LUCENE-2629: Fix gennorm2 task for generating ICUFoldingFilter's .nrm file. This allows
you to customize its normalization/folding, by editing the source data files in src/data
and regenerating a new .nrm with 'ant gennorm2'. (David Bowen via Robert Muir)
* LUCENE-2653: ThaiWordFilter depends on the JRE having a Thai dictionary, which is not
* LUCENE-2653: ThaiWordFilter depends on the JRE having a Thai dictionary, which is not
always the case. If the dictionary is unavailable, the filter will now throw
UnsupportedOperationException in the constructor. (Robert Muir)
* LUCENE-589: Fix contrib/demo for international documents.
* LUCENE-589: Fix contrib/demo for international documents.
(Curtis d'Entremont via Robert Muir)
* LUCENE-2246: Fix contrib/demo for Turkish html documents.
* LUCENE-2246: Fix contrib/demo for Turkish html documents.
(Selim Nadi via Robert Muir)
* LUCENE-590: Demo HTML parser gives incorrect summaries when title is repeated as a heading
* LUCENE-590: Demo HTML parser gives incorrect summaries when title is repeated as a heading
(Curtis d'Entremont via Robert Muir)
* LUCENE-591: The demo indexer now indexes meta keywords.
* LUCENE-591: The demo indexer now indexes meta keywords.
(Curtis d'Entremont via Robert Muir)
* LUCENE-2874: Highlighting overlapping tokens outputted doubled words.
(Pierre Gossé via Robert Muir)
* LUCENE-2943: Fix thread-safety issues with ICUCollationKeyFilter.
(Robert Muir)
API Changes
* LUCENE-2867: Some contrib queryparser methods that receives CharSequence as
identifier, such as QueryNode#unsetTag(CharSequence), were deprecated and
will be removed on version 4. (Adriano Crestani)
* LUCENE-2147: Spatial GeoHashUtils now always decode GeoHash strings
with full precision. GeoHash#decode_exactly(String) was merged into
GeoHash#decode(String). (Chris Male, Simon Willnauer)
@ -186,11 +224,20 @@ API Changes
* LUCENE-2747: Deprecated ArabicLetterTokenizer. StandardTokenizer now tokenizes
most languages correctly including Arabic. (Steven Rowe, Robert Muir)
* LUCENE-2830: Use StringBuilder instead of StringBuffer across Benchmark, and
remove the StringBuffer HtmlParser.parse() variant. (Shai Erera)
* LUCENE-2920: Deprecated ShingleMatrixFilter as it is unmaintained and does
not work with custom Attributes or custom payload encoders. (Uwe Schindler)
New features
* LUCENE-2500: Added DirectIOLinuxDirectory, a Linux-specific
Directory impl that uses the O_DIRECT flag to bypass the buffer
cache. This is useful to prevent segment merging from evicting
pages from the buffer cache, since fadvise/madvise do not seem.
(Michael McCandless)
* LUCENE-2306: Add NumericRangeFilter and NumericRangeQuery support to XMLQueryParser.
(Jingkei Ly, via Mark Harwood)
@ -280,6 +327,17 @@ New features
BooleanModifiersQueryNodeProcessor, for example instead of GroupQueryNodeProcessor.
(Adriano Crestani via Robert Muir)
* LUCENE-2791: Added WindowsDirectory, a Windows-specific Directory impl
that doesn't synchronize on the file handle. This can be useful to
avoid the performance problems of SimpleFSDirectory and NIOFSDirectory.
(Robert Muir, Simon Willnauer, Uwe Schindler, Michael McCandless)
* LUCENE-2842: Add analyzer for Galician. Also adds the RSLP (Orengo) stemmer
for Portuguese. (Robert Muir)
* SOLR-1057: Add PathHierarchyTokenizer that represents file path hierarchies as synonyms of
/something, /something/something, /something/something/else. (Ryan McKinley, Koji Sekiguchi)
Build
* LUCENE-2124: Moved the JDK-based collation support from contrib/collation
@ -300,6 +358,11 @@ Build
* LUCENE-2797: Upgrade contrib/icu's ICU jar file to ICU 4.6
(Robert Muir)
* LUCENE-2833: Upgrade contrib/ant's jtidy jar file to r938 (Robert Muir)
* LUCENE-2413: Moved the demo out of lucene core and into contrib/demo.
(Robert Muir)
Optimizations
* LUCENE-2157: DelimitedPayloadTokenFilter no longer copies the buffer

View File

@ -39,7 +39,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
@ -285,9 +285,9 @@ public class IndexTask extends Task {
IndexWriterConfig conf = new IndexWriterConfig(
Version.LUCENE_CURRENT, analyzer).setOpenMode(
create ? OpenMode.CREATE : OpenMode.APPEND);
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setUseCompoundFile(useCompoundIndex);
lmp.setMergeFactor(mergeFactor);
TieredMergePolicy tmp = (TieredMergePolicy) conf.getMergePolicy();
tmp.setUseCompoundFile(useCompoundIndex);
tmp.setMaxMergeAtOnce(mergeFactor);
IndexWriter writer = new IndexWriter(dir, conf);
int totalFiles = 0;
int totalIndexed = 0;

View File

@ -0,0 +1,2 @@
AnyObjectId[9a9ff077cdd36a96e7e0506986edd4e52b90a22f] was removed in git history.
Apache SVN contains full history.

View File

@ -0,0 +1 @@
No bdb jars are shipped with lucene. This is a fake license to work around the automated license checking.

View File

@ -0,0 +1 @@
No bdb jars are shipped with lucene. This is a fake license to work around the automated license checking.

View File

@ -0,0 +1 @@
No bdb jars are shipped with lucene. This is a fake license to work around the automated license checking.

View File

@ -0,0 +1,2 @@
AnyObjectId[99baf20bacd712cae91dd6e4e1f46224cafa1a37] was removed in git history.
Apache SVN contains full history.

View File

@ -0,0 +1 @@
No bdb jars are shipped with lucene. This is a fake license to work around the automated license checking.

View File

@ -22,16 +22,17 @@ import java.io.File;
import java.io.PrintStream;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
public class TestDemo extends LuceneTestCase {
private void testOneSearch(String query, int expectedHitCount) throws Exception {
private void testOneSearch(File indexPath, String query, int expectedHitCount) throws Exception {
PrintStream outSave = System.out;
try {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
PrintStream fakeSystemOut = new PrintStream(bytes);
System.setOut(fakeSystemOut);
SearchFiles.main(new String[] {"-query", query});
SearchFiles.main(new String[] {"-query", query, "-index", indexPath.getPath()});
fakeSystemOut.flush();
String output = bytes.toString(); // intentionally use default encoding
assertTrue("output=" + output, output.contains(expectedHitCount + " total matching documents"));
@ -42,12 +43,13 @@ public class TestDemo extends LuceneTestCase {
public void testIndexSearch() throws Exception {
File dir = getDataFile("test-files/docs");
IndexFiles.main(new String[] { "-create", "-docs", dir.getPath() });
testOneSearch("apache", 3);
testOneSearch("patent", 8);
testOneSearch("lucene", 0);
testOneSearch("gnu", 6);
testOneSearch("derivative", 8);
testOneSearch("license", 13);
File indexDir = _TestUtil.getTempDir("ContribDemoTest");
IndexFiles.main(new String[] { "-create", "-docs", dir.getPath(), "-index", indexDir.getPath()});
testOneSearch(indexDir, "apache", 3);
testOneSearch(indexDir, "patent", 8);
testOneSearch(indexDir, "lucene", 0);
testOneSearch(indexDir, "gnu", 6);
testOneSearch(indexDir, "derivative", 8);
testOneSearch(indexDir, "license", 13);
}
}

View File

@ -1,2 +0,0 @@
build
dist

View File

@ -197,6 +197,11 @@ public class Highlighter
tokenStream.reset();
TextFragment currentFrag = new TextFragment(newText,newText.length(), docFrags.size());
if (fragmentScorer instanceof QueryScorer) {
((QueryScorer) fragmentScorer).setMaxDocCharsToAnalyze(maxDocCharsToAnalyze);
}
TokenStream newStream = fragmentScorer.init(tokenStream);
if(newStream != null) {
tokenStream = newStream;

View File

@ -0,0 +1,57 @@
package org.apache.lucene.search.highlight;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
/**
* This TokenFilter limits the number of tokens while indexing by adding up the
* current offset.
*/
public final class OffsetLimitTokenFilter extends TokenFilter {
private int offsetCount;
private OffsetAttribute offsetAttrib = getAttribute(OffsetAttribute.class);
private int offsetLimit;
public OffsetLimitTokenFilter(TokenStream input, int offsetLimit) {
super(input);
this.offsetLimit = offsetLimit;
}
@Override
public boolean incrementToken() throws IOException {
if (offsetCount < offsetLimit && input.incrementToken()) {
int offsetLength = offsetAttrib.endOffset() - offsetAttrib.startOffset();
offsetCount += offsetLength;
return true;
}
return false;
}
@Override
public void reset() throws IOException {
super.reset();
offsetCount = 0;
}
}

View File

@ -54,6 +54,7 @@ public class QueryScorer implements Scorer {
private IndexReader reader;
private boolean skipInitExtractor;
private boolean wrapToCaching = true;
private int maxCharsToAnalyze;
/**
* @param query Query to use for highlighting
@ -209,7 +210,7 @@ public class QueryScorer implements Scorer {
private TokenStream initExtractor(TokenStream tokenStream) throws IOException {
WeightedSpanTermExtractor qse = defaultField == null ? new WeightedSpanTermExtractor()
: new WeightedSpanTermExtractor(defaultField);
qse.setMaxDocCharsToAnalyze(maxCharsToAnalyze);
qse.setExpandMultiTermQuery(expandMultiTermQuery);
qse.setWrapIfNotCachingTokenFilter(wrapToCaching);
if (reader == null) {
@ -265,4 +266,8 @@ public class QueryScorer implements Scorer {
public void setWrapIfNotCachingTokenFilter(boolean wrap) {
this.wrapToCaching = wrap;
}
public void setMaxDocCharsToAnalyze(int maxDocCharsToAnalyze) {
this.maxCharsToAnalyze = maxDocCharsToAnalyze;
}
}

View File

@ -56,6 +56,7 @@ public class WeightedSpanTermExtractor {
private boolean expandMultiTermQuery;
private boolean cachedTokenStream;
private boolean wrapToCaching = true;
private int maxDocCharsToAnalyze;
public WeightedSpanTermExtractor() {
}
@ -320,13 +321,13 @@ public class WeightedSpanTermExtractor {
private AtomicReaderContext getLeafContextForField(String field) throws IOException {
if(wrapToCaching && !cachedTokenStream && !(tokenStream instanceof CachingTokenFilter)) {
tokenStream = new CachingTokenFilter(tokenStream);
tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
cachedTokenStream = true;
}
AtomicReaderContext context = readers.get(field);
if (context == null) {
MemoryIndex indexer = new MemoryIndex();
indexer.addField(field, tokenStream);
indexer.addField(field, new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
tokenStream.reset();
IndexSearcher searcher = indexer.createSearcher();
// MEM index has only atomic ctx
@ -545,4 +546,8 @@ public class WeightedSpanTermExtractor {
public void setWrapIfNotCachingTokenFilter(boolean wrap) {
this.wrapToCaching = wrap;
}
protected final void setMaxDocCharsToAnalyze(int maxDocCharsToAnalyze) {
this.maxDocCharsToAnalyze = maxDocCharsToAnalyze;
}
}

View File

@ -58,7 +58,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox jumped";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@ -102,7 +102,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox jumped";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@ -172,7 +172,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@ -215,7 +215,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, TEXT, Store.YES, Index.ANALYZED,
@ -256,7 +256,7 @@ public class HighlighterPhraseTest extends LuceneTestCase {
final String TEXT = "the fox did not jump";
final Directory directory = newDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),

View File

@ -90,7 +90,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Directory ramDir;
public IndexSearcher searcher = null;
int numHighlights = 0;
final Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
final Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
TopDocs hits;
String[] texts = {
@ -101,7 +101,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
"wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" };
public void testQueryScorerHits() throws Exception {
Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, true);
@ -133,7 +133,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String s1 = "I call our world Flatland, not because we call it so,";
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
// Verify that a query against the default field results in text being
// highlighted
@ -165,7 +165,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
*/
private static String highlightField(Query query, String fieldName, String text)
throws IOException, InvalidTokenOffsetsException {
TokenStream tokenStream = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text));
TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text));
// Assuming "<B>", "</B>" used to highlight
SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@ -210,7 +210,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String f2c = f2 + ":";
String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2
+ " OR " + f2c + ph2 + ")";
Analyzer analyzer = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, f1, analyzer);
Query query = qp.parse(q);
@ -1134,13 +1134,13 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
sb.append("stoppedtoken");
}
SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
Highlighter hg = getHighlighter(query, "data", new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true).tokenStream(
Highlighter hg = getHighlighter(query, "data", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream(
"data", new StringReader(sb.toString())), fm);// new Highlighter(fm,
// new
// QueryTermScorer(query));
hg.setTextFragmenter(new NullFragmenter());
hg.setMaxDocCharsToAnalyze(100);
match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocCharsToAnalyze());
@ -1151,7 +1151,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// + whitespace)
sb.append(" ");
sb.append(goodWord);
match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocCharsToAnalyze());
}
@ -1170,10 +1170,10 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
String text = "this is a text with searchterm in it";
SimpleHTMLFormatter fm = new SimpleHTMLFormatter();
Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm);
Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm);
hg.setTextFragmenter(new NullFragmenter());
hg.setMaxDocCharsToAnalyze(36);
String match = hg.getBestFragment(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopWords, true), "text", text);
String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text);
assertTrue(
"Matched text should contain remainder of text after highlighted query ",
match.endsWith("in it"));
@ -1191,7 +1191,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// test to show how rewritten query can still be used
if (searcher != null) searcher.close();
searcher = new IndexSearcher(ramDir, true);
Analyzer analyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
Query query = parser.parse("JF? or Kenned*");
@ -1446,64 +1446,64 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Highlighter highlighter;
String result;
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("foo");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 <B>foo</B>", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("10");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("10");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed<B>10</B> foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi</B>-Speed10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-<B>Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hispeed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
// ///////////////// same tests, just put the bigger overlapping token
// first
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("foo");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 <B>foo</B>", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("10");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("10");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed<B>10</B> foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi</B>-Speed10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-<B>Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hispeed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("hi speed");
query = new QueryParser(TEST_VERSION_CURRENT, "text", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("<B>Hi-Speed</B>10 foo", result);
@ -1514,7 +1514,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private Directory dir;
private Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
private Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
public void testWeightedTermsWithDeletes() throws IOException, ParseException, InvalidTokenOffsetsException {
makeIndex();
@ -1529,7 +1529,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private void makeIndex() throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
@ -1539,7 +1539,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
}
private void deleteDocument() throws IOException {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
writer.deleteDocuments( new Term( "t_text1", "del" ) );
// To see negative idf, keep comment the following line
//writer.optimize();
@ -1644,7 +1644,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
dir = newDirectory();
ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
for (String text : texts) {
addDoc(writer, text);
}

View File

@ -0,0 +1,60 @@
package org.apache.lucene.search.highlight;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
public class OffsetLimitTokenFilterTest extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
TokenStream stream = new MockTokenizer(new StringReader(
"short toolong evenmuchlongertext a ab toolong foo"),
MockTokenizer.WHITESPACE, false);
OffsetLimitTokenFilter filter = new OffsetLimitTokenFilter(stream, 10);
assertTokenStreamContents(filter, new String[] {"short", "toolong"});
stream = new MockTokenizer(new StringReader(
"short toolong evenmuchlongertext a ab toolong foo"),
MockTokenizer.WHITESPACE, false);
filter = new OffsetLimitTokenFilter(stream, 12);
assertTokenStreamContents(filter, new String[] {"short", "toolong"});
stream = new MockTokenizer(new StringReader(
"short toolong evenmuchlongertext a ab toolong foo"),
MockTokenizer.WHITESPACE, false);
filter = new OffsetLimitTokenFilter(stream, 30);
assertTokenStreamContents(filter, new String[] {"short", "toolong",
"evenmuchlongertext"});
checkOneTermReuse(new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new OffsetLimitTokenFilter(new MockTokenizer(reader,
MockTokenizer.WHITESPACE, false), 10);
}
}, "llenges", "llenges");
}
}

View File

@ -87,9 +87,9 @@ public abstract class AbstractTestCase extends LuceneTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
analyzerW = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
analyzerW = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
analyzerB = new BigramAnalyzer();
analyzerK = new MockAnalyzer(MockTokenizer.KEYWORD, false);
analyzerK = new MockAnalyzer(random, MockTokenizer.KEYWORD, false);
paW = new QueryParser(TEST_VERSION_CURRENT, F, analyzerW );
paB = new QueryParser(TEST_VERSION_CURRENT, F, analyzerB );
dir = newDirectory();

View File

@ -32,8 +32,7 @@ import java.util.Comparator;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.*;
import org.apache.lucene.index.values.DocValues;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.codecs.PerDocValues;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitVector;
import org.apache.lucene.util.BytesRef;
@ -391,11 +390,6 @@ public class InstantiatedIndexReader extends IndexReader {
public TermsEnum terms() {
return new InstantiatedTermsEnum(orderedTerms, upto, currentField);
}
@Override
public DocValues docValues() throws IOException {
return null;
}
};
}
@ -439,11 +433,6 @@ public class InstantiatedIndexReader extends IndexReader {
}
};
}
@Override
public DocValues docValues(String field) throws IOException {
return null;
}
};
}
@ -498,4 +487,9 @@ public class InstantiatedIndexReader extends IndexReader {
}
}
}
@Override
public PerDocValues perDocValues() throws IOException {
return null;
}
}

View File

@ -59,7 +59,7 @@ public class TestEmptyIndex extends LuceneTestCase {
// make sure a Directory acts the same
Directory d = newDirectory();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
r = IndexReader.open(d, false);
testNorms(r);
r.close();
@ -84,7 +84,7 @@ public class TestEmptyIndex extends LuceneTestCase {
// make sure a Directory acts the same
Directory d = newDirectory();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())).close();
new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).close();
r = IndexReader.open(d, false);
termsEnumTest(r);
r.close();

View File

@ -21,6 +21,7 @@ import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
@ -65,7 +66,7 @@ public class TestIndicesEquals extends LuceneTestCase {
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < 20; i++) {
Document document = new Document();
@ -89,9 +90,12 @@ public class TestIndicesEquals extends LuceneTestCase {
Directory dir = newDirectory();
InstantiatedIndex ii = new InstantiatedIndex();
// we need to pass the "same" random to both, so they surely index the same payload data.
long seed = random.nextLong();
// create dir data
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))).setMergePolicy(newLogMergePolicy()));
indexWriter.setInfoStream(VERBOSE ? System.out : null);
if (VERBOSE) {
System.out.println("TEST: make test index");
@ -104,7 +108,7 @@ public class TestIndicesEquals extends LuceneTestCase {
indexWriter.close();
// test ii writer
InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new MockAnalyzer(), true);
InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new MockAnalyzer(new Random(seed)), true);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);

View File

@ -36,7 +36,7 @@ public class TestRealTime extends LuceneTestCase {
InstantiatedIndex index = new InstantiatedIndex();
InstantiatedIndexReader reader = new InstantiatedIndexReader(index);
IndexSearcher searcher = newSearcher(reader);
IndexSearcher searcher = newSearcher(reader, false);
InstantiatedIndexWriter writer = new InstantiatedIndexWriter(index);
Document doc;

View File

@ -34,17 +34,17 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addDocument(iw, "Hello, world!");
addDocument(iw, "All work and no play makes jack a dull boy");
iw.close();
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
addDocument(iw, "Hello, tellus!");
addDocument(iw, "All work and no play makes danny a dull boy");
iw.close();
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
addDocument(iw, "Hello, earth!");
addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close();

View File

@ -52,7 +52,7 @@ import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.index.TermVectorMapper;
import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.values.DocValues;
import org.apache.lucene.index.codecs.PerDocValues;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@ -807,12 +807,6 @@ public class MemoryIndex {
public TermsEnum terms() {
return new MemoryTermsEnum(sortedFields[upto].getValue());
}
@Override
public DocValues docValues() throws IOException {
// TODO
throw new UnsupportedOperationException("not implemented");
}
};
}
@ -848,12 +842,6 @@ public class MemoryIndex {
};
}
}
@Override
public DocValues docValues(String field) throws IOException {
// TODO
throw new UnsupportedOperationException("not implemented");
}
};
}
@ -1287,6 +1275,11 @@ public class MemoryIndex {
return Collections.unmodifiableSet(fields.keySet());
}
@Override
public PerDocValues perDocValues() throws IOException {
return null;
}
}

View File

@ -143,9 +143,9 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
*/
private Analyzer randomAnalyzer() {
switch(random.nextInt(3)) {
case 0: return new MockAnalyzer(MockTokenizer.SIMPLE, true);
case 1: return new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
default: return new MockAnalyzer(MockTokenizer.WHITESPACE, false);
case 0: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
case 1: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
default: return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
}
}

View File

@ -61,7 +61,7 @@ public class TestFieldNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();

View File

@ -39,7 +39,7 @@ public class TestIndexSplitter extends LuceneTestCase {
mergePolicy.setNoCFSRatio(1);
IndexWriter iw = new IndexWriter(
fsDir,
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
setOpenMode(OpenMode.CREATE).
setMergePolicy(mergePolicy)
);

View File

@ -32,7 +32,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
doc = new Document();

View File

@ -25,7 +25,7 @@ public class TestTermVectorAccessor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
Document doc;

View File

@ -30,7 +30,7 @@ import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@ -134,10 +134,10 @@ public class TestAppendingCodec extends LuceneTestCase {
public void testCodec() throws Exception {
Directory dir = new AppendingRAMDirectory(random, new RAMDirectory());
IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer());
IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random));
cfg.setCodecProvider(new AppendingCodecProvider());
((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false);
((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false);
IndexWriter writer = new IndexWriter(dir, cfg);
Document doc = new Document();
doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));

View File

@ -40,7 +40,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
public static void setUpClass() throws Exception {
dir = newDirectory();
writer = new IndexWriter(dir, newIndexWriterConfig(random,
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false))
TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))
.setMaxBufferedDocs(2));
writer.setInfoStream(VERBOSE ? System.out : null);
indexDocs(writer);

View File

@ -66,7 +66,7 @@ public class TestLengthNormModifier extends LuceneTestCase {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();

View File

@ -39,7 +39,7 @@ public class BooleanFilterTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
addDoc(writer, "admin guest", "010", "20040101","Y");

View File

@ -43,7 +43,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");

View File

@ -34,13 +34,13 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private Directory directory;
private IndexSearcher searcher;
private IndexReader reader;
private Analyzer analyzer=new MockAnalyzer();
private Analyzer analyzer=new MockAnalyzer(random);
@Override
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newInOrderLogMergePolicy()));
RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
//Add series of docs with misspelt names
addDoc(writer, "jonathon smythe","1");
@ -121,7 +121,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
}
public void testFuzzyLikeThisQueryEquals() {
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer);
fltq1.addTerms("javi", "subject", 0.5f, 2);
FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer);

View File

@ -36,8 +36,8 @@ public class TestFieldCacheRewriteMethod extends TestRegexpRandom2 {
RegexpQuery filter = new RegexpQuery(new Term("field", regexp), RegExp.NONE);
filter.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
TopDocs fieldCacheDocs = searcher.search(fieldCache, 25);
TopDocs filterDocs = searcher.search(filter, 25);
TopDocs fieldCacheDocs = searcher1.search(fieldCache, 25);
TopDocs filterDocs = searcher2.search(filter, 25);
CheckHits.checkEqual(fieldCache, fieldCacheDocs.scoreDocs, filterDocs.scoreDocs);
}

View File

@ -56,7 +56,7 @@ public class TestSpanRegexQuery extends LuceneTestCase {
public void testSpanRegex() throws Exception {
Directory directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
Document doc = new Document();
// doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
@ -97,14 +97,14 @@ public class TestSpanRegexQuery extends LuceneTestCase {
// creating first index writer
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerA.addDocument(lDoc);
writerA.optimize();
writerA.close();
// creating second index writer
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerB.addDocument(lDoc2);
writerB.optimize();
writerB.close();

View File

@ -74,7 +74,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
Map<String,Float> originalValues = getOriginalValues();
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
mlt.setMinDocFreq(1);
mlt.setMinTermFreq(1);
mlt.setMinWordLen(1);
@ -109,7 +109,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
private Map<String,Float> getOriginalValues() throws IOException {
Map<String,Float> originalValues = new HashMap<String,Float>();
MoreLikeThis mlt = new MoreLikeThis(reader);
mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
mlt.setMinDocFreq(1);
mlt.setMinTermFreq(1);
mlt.setMinWordLen(1);

View File

@ -160,7 +160,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable {
/** verify if a node contains a tag */
public boolean containsTag(String tagName) {
return this.tags.containsKey(tagName);
return this.tags.containsKey(tagName.toLowerCase());
}
public Object getTag(String tagName) {

View File

@ -34,7 +34,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestComplexPhraseQuery extends LuceneTestCase {
Directory rd;
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
DocData docsContent[] = { new DocData("john smith", "1"),
new DocData("johathon smith", "2"),

View File

@ -32,4 +32,16 @@ public class TestQueryNode extends LuceneTestCase {
bq.add(Arrays.asList(nodeB));
assertEquals(2, bq.getChildren().size());
}
/* LUCENE-3045 bug in QueryNodeImpl.containsTag(String key)*/
public void testTags() throws Exception {
QueryNode node = new FieldQueryNode("foo", "A", 0, 1);
node.setTag("TaG", new Object());
assertTrue(node.getTagMap().size() > 0);
assertTrue(node.containsTag("tAg"));
assertTrue(node.getTag("tAg") != null);
}
}

View File

@ -43,7 +43,7 @@ public class TestExtendableQueryParser extends TestQueryParser {
public QueryParser getParser(Analyzer a, Extensions extensions)
throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
QueryParser qp = extensions == null ? new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a, extensions);

View File

@ -125,7 +125,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.OR);
@ -171,7 +171,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@ -232,7 +232,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
"+(title:dog title:cat) -author:\"bob dole\"");
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(new MockAnalyzer());
qp.setAnalyzer(new MockAnalyzer(random));
// make sure OR is the default:
assertEquals(Operator.OR, qp.getDefaultOperator());
qp.setDefaultOperator(Operator.AND);
@ -246,7 +246,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
}
public void testPunct() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@ -266,7 +266,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
Analyzer a = new MockAnalyzer();
Analyzer a = new MockAnalyzer(random);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@ -405,7 +405,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
final String defaultField = "default";
final String monthField = "month";
final String hourField = "hour";
PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer());
PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random));
Map<CharSequence, DateTools.Resolution> fieldMap = new HashMap<CharSequence,DateTools.Resolution>();
// set a field specific date resolution
@ -467,7 +467,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
}
public void testEscaped() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a\\-b:c", a, "a-b:c");
assertQueryEquals("a\\+b:c", a, "a+b:c");
@ -533,7 +533,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public void testBoost() throws Exception {
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on"));
Analyzer oneStopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true);
Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@ -548,7 +548,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
q = qp.parse("\"on\"^1.0", "field");
assertNotNull(q);
q = getParser(new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3",
q = getParser(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3",
"field");
assertNotNull(q);
}
@ -564,7 +564,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
getParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("one two three", "field");
getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
} catch (QueryNodeException expected) {
// too many boolean clauses, so ParseException is expected
@ -573,7 +573,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
// LUCENE-792
public void testNOT() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("NOT foo AND bar", a, "-foo +bar");
}
@ -582,7 +582,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
* issue has been corrected.
*/
public void testPrecedence() throws Exception {
PrecedenceQueryParser parser = getParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
PrecedenceQueryParser parser = getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query query1 = parser.parse("A AND B OR C AND D", "field");
Query query2 = parser.parse("(A AND B) OR (C AND D)", "field");
assertEquals(query1, query2);

View File

@ -80,7 +80,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setAnalyzer(new MockAnalyzer());
mfqp.setAnalyzer(new MockAnalyzer(random));
Query q = mfqp.parse("one", null);
assertEquals("b:one t:one", q.toString());
@ -150,7 +150,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
mfqp.setAnalyzer(new MockAnalyzer());
mfqp.setAnalyzer(new MockAnalyzer(random));
// Check for simple
Query q = mfqp.parse("one", null);
@ -178,24 +178,24 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
public void testStaticMethod1() throws QueryNodeException {
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer());
Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer());
q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -219,15 +219,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse("one", fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+b:one -t:one", q.toString());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer());
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -240,19 +240,19 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur.MUST_NOT };
StandardQueryParser parser = new StandardQueryParser();
parser.setMultiFields(fields);
parser.setAnalyzer(new MockAnalyzer());
parser.setAnalyzer(new MockAnalyzer(random));
Query q = QueryParserUtil.parse("one", fields, flags,
new MockAnalyzer());// , fields, flags, new
new MockAnalyzer(random));// , fields, flags, new
// MockAnalyzer());
assertEquals("+b:one -t:one", q.toString());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer());
q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer());
q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -265,13 +265,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = QueryParserUtil.parse(queries, fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
.parse(queries, fields, flags2, new MockAnalyzer());
.parse(queries, fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -284,13 +284,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse(queries, fields, flags,
new MockAnalyzer());
new MockAnalyzer(random));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
.parse(queries, fields, flags2, new MockAnalyzer());
.parse(queries, fields, flags2, new MockAnalyzer(random));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@ -316,7 +316,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
}
public void testStopWordSearching() throws Exception {
Analyzer analyzer = new MockAnalyzer();
Analyzer analyzer = new MockAnalyzer(random);
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
@ -342,7 +342,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
* Return empty tokens for field "f1".
*/
private static final class AnalyzerReturningNull extends Analyzer {
MockAnalyzer stdAnalyzer = new MockAnalyzer();
MockAnalyzer stdAnalyzer = new MockAnalyzer(random);
public AnalyzerReturningNull() {
}

View File

@ -191,7 +191,7 @@ public class TestQPHelper extends LuceneTestCase {
public StandardQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
@ -281,7 +281,7 @@ public class TestQPHelper extends LuceneTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
a = new MockAnalyzer(MockTokenizer.SIMPLE, true);
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@ -301,7 +301,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testConstantScoreAutoRewrite() throws Exception {
StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query q = qp.parse("foo*bar", "field");
assertTrue(q instanceof WildcardQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod());
@ -410,9 +410,9 @@ public class TestQPHelper extends LuceneTestCase {
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
assertQueryEquals("t<EFBFBD>rm term term", new MockAnalyzer(MockTokenizer.WHITESPACE, false),
assertQueryEquals("t<EFBFBD>rm term term", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false),
"t<EFBFBD>rm term term");
assertQueryEquals("<EFBFBD>mlaut", new MockAnalyzer(MockTokenizer.WHITESPACE, false), "<EFBFBD>mlaut");
assertQueryEquals("<EFBFBD>mlaut", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), "<EFBFBD>mlaut");
// FIXME: change MockAnalyzer to not extend CharTokenizer for this test
//assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
@ -470,7 +470,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testPunct() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@ -491,7 +491,7 @@ public class TestQPHelper extends LuceneTestCase {
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@ -726,7 +726,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testEscaped() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@ -825,7 +825,7 @@ public class TestQPHelper extends LuceneTestCase {
}
public void testQueryStringEscaping() throws Exception {
Analyzer a = new MockAnalyzer(MockTokenizer.WHITESPACE, false);
Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@ -866,7 +866,7 @@ public class TestQPHelper extends LuceneTestCase {
@Ignore("contrib queryparser shouldn't escape wildcard terms")
public void testEscapedWildcard() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r"));
assertEquals(q, qp.parse("foo\\?ba?r", "field"));
@ -904,7 +904,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testBoost() throws Exception {
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on"));
Analyzer oneStopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true);
Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@ -920,7 +920,7 @@ public class TestQPHelper extends LuceneTestCase {
assertNotNull(q);
StandardQueryParser qp2 = new StandardQueryParser();
qp2.setAnalyzer(new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
qp2.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
q = qp2.parse("the^3", "field");
// "the" is a stop word so the result is an empty query:
@ -950,7 +950,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testCustomQueryParserWildcard() {
try {
new QPTestParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("a?t", "contents");
new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t", "contents");
fail("Wildcard queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@ -959,7 +959,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testCustomQueryParserFuzzy() throws Exception {
try {
new QPTestParser(new MockAnalyzer(MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents");
new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents");
fail("Fuzzy queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@ -970,7 +970,7 @@ public class TestQPHelper extends LuceneTestCase {
BooleanQuery.setMaxClauseCount(2);
try {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
qp.parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
@ -984,7 +984,7 @@ public class TestQPHelper extends LuceneTestCase {
*/
public void testPrecedence() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
Query query1 = qp.parse("A AND B OR C AND D", "field");
Query query2 = qp.parse("+A +B +C +D", "field");
@ -995,7 +995,7 @@ public class TestQPHelper extends LuceneTestCase {
// Todo: Convert from DateField to DateUtil
// public void testLocalDateFormat() throws IOException, QueryNodeException {
// Directory ramDir = newDirectory();
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
// iw.close();
@ -1116,7 +1116,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testStopwords() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton());
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.SIMPLE, true, stopSet, true));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true));
Query result = qp.parse("a:the OR a:foo", "a");
assertNotNull("result is null and it shouldn't be", result);
@ -1140,7 +1140,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testPositionIncrement() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
qp.setEnablePositionIncrements(true);
@ -1161,7 +1161,7 @@ public class TestQPHelper extends LuceneTestCase {
public void testMatchAllDocs() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field"));
@ -1173,7 +1173,7 @@ public class TestQPHelper extends LuceneTestCase {
private void assertHits(int expected, String query, IndexSearcher is)
throws IOException, QueryNodeException {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query, "date");

View File

@ -41,7 +41,7 @@ public class SingleFieldTestDb {
fieldName = fName;
IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(
Version.LUCENE_CURRENT,
new MockAnalyzer()));
new MockAnalyzer(random)));
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED));

View File

@ -71,7 +71,7 @@ public class TestCartesian extends LuceneTestCase {
super.setUp();
directory = newDirectory();
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
setUpPlotter( 2, 15);

View File

@ -47,7 +47,7 @@ public class TestDistance extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
directory = newDirectory();
writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
addData(writer);
}

View File

@ -29,7 +29,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.Terms;
@ -45,7 +45,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.VirtualMethod;
/**
* <p>
@ -508,7 +507,7 @@ public class SpellChecker implements java.io.Closeable {
ensureOpen();
final Directory dir = this.spellIndex;
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).setRAMBufferSizeMB(ramMB));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(mergeFactor);
((TieredMergePolicy) writer.getConfig().getMergePolicy()).setMaxMergeAtOnce(mergeFactor);
IndexSearcher indexSearcher = obtainSearcher();
final List<TermsEnum> termsEnums = new ArrayList<TermsEnum>();

View File

@ -35,7 +35,7 @@ public class TestDirectSpellChecker extends LuceneTestCase {
spellChecker.setMinQueryLength(0);
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(MockTokenizer.SIMPLE, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
for (int i = 0; i < 20; i++) {
Document doc = new Document();
@ -93,7 +93,7 @@ public class TestDirectSpellChecker extends LuceneTestCase {
public void testOptions() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir,
new MockAnalyzer(MockTokenizer.SIMPLE, true));
new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
Document doc = new Document();
doc.add(newField("text", "foobar", Field.Store.NO, Field.Index.ANALYZED));

View File

@ -46,7 +46,7 @@ public class TestLuceneDictionary extends LuceneTestCase {
public void setUp() throws Exception {
super.setUp();
store = newDirectory();
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
Document doc;

View File

@ -54,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase {
//create a user index
userindex = newDirectory();
IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for (int i = 0; i < 1000; i++) {
Document doc = new Document();

View File

@ -36,7 +36,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@ -250,7 +250,7 @@ public class Syns2Index
// override the specific index if it already exists
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
Version.LUCENE_CURRENT, ana).setOpenMode(OpenMode.CREATE));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true); // why?
((TieredMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true); // why?
Iterator<String> i1 = word2Nums.keySet().iterator();
while (i1.hasNext()) // for each word
{

Some files were not shown because too many files have changed in this diff Show More