nuke more contrib references

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1328874 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-04-22 12:59:34 +00:00
parent 0bf1f362eb
commit 29f71547d3
9 changed files with 12 additions and 12 deletions

View File

@ -31,6 +31,6 @@ using this Analyzer you are NOT affected.
* SimpleAnalyzer, StopAnalyzer, LetterTokenizer, LowerCaseFilter, and * SimpleAnalyzer, StopAnalyzer, LetterTokenizer, LowerCaseFilter, and
LowerCaseTokenizer may return different results, along with many other Analyzers LowerCaseTokenizer may return different results, along with many other Analyzers
and TokenStreams in Lucene's contrib area. If you are using one of these and TokenStreams in Lucene's analysis modules. If you are using one of these
components, you may be affected. components, you may be affected.

View File

@ -51,8 +51,8 @@ import org.apache.lucene.util.AttributeSource;
* </li> * </li>
* </ol> * </ol>
* <p> * <p>
* The <code>ICUCollationAttributeFactory</code> in the icu package of Lucene's * The <code>ICUCollationAttributeFactory</code> in the analysis-icu package
* contrib area uses ICU4J's Collator, which makes its * uses ICU4J's Collator, which makes its
* version available, thus allowing collation to be versioned independently * version available, thus allowing collation to be versioned independently
* from the JVM. ICUCollationAttributeFactory is also significantly faster and * from the JVM. ICUCollationAttributeFactory is also significantly faster and
* generates significantly shorter keys than CollationAttributeFactory. See * generates significantly shorter keys than CollationAttributeFactory. See

View File

@ -58,8 +58,8 @@ import java.io.Reader;
* </li> * </li>
* </ol> * </ol>
* <p> * <p>
* The <code>ICUCollationKeyAnalyzer</code> in the icu package of Lucene's * The <code>ICUCollationKeyAnalyzer</code> in the analysis-icu package
* contrib area uses ICU4J's Collator, which makes its * uses ICU4J's Collator, which makes its
* its version available, thus allowing collation to be versioned * its version available, thus allowing collation to be versioned
* independently from the JVM. ICUCollationKeyAnalyzer is also significantly * independently from the JVM. ICUCollationKeyAnalyzer is also significantly
* faster and generates significantly shorter keys than CollationKeyAnalyzer. * faster and generates significantly shorter keys than CollationKeyAnalyzer.

View File

@ -55,8 +55,8 @@ import java.text.Collator;
* </li> * </li>
* </ol> * </ol>
* <p> * <p>
* The <code>ICUCollationKeyFilter</code> in the icu package of Lucene's * The <code>ICUCollationKeyFilter</code> in the analysis-icu package
* contrib area uses ICU4J's Collator, which makes its * uses ICU4J's Collator, which makes its
* version available, thus allowing collation to be versioned independently * version available, thus allowing collation to be versioned independently
* from the JVM. ICUCollationKeyFilter is also significantly faster and * from the JVM. ICUCollationKeyFilter is also significantly faster and
* generates significantly shorter keys than CollationKeyFilter. See * generates significantly shorter keys than CollationKeyFilter. See

View File

@ -325,7 +325,7 @@ public class MemoryIndex {
* Equivalent to adding a tokenized, indexed, termVectorStored, unstored, * Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
* Lucene {@link org.apache.lucene.document.Field}. * Lucene {@link org.apache.lucene.document.Field}.
* Finally closes the token stream. Note that untokenized keywords can be added with this method via * Finally closes the token stream. Note that untokenized keywords can be added with this method via
* {@link #keywordTokenStream(Collection)}, the Lucene contrib <code>KeywordTokenizer</code> or similar utilities. * {@link #keywordTokenStream(Collection)}, the Lucene <code>KeywordTokenizer</code> or similar utilities.
* *
* @param fieldName * @param fieldName
* a name to be associated with the text * a name to be associated with the text

View File

@ -20,7 +20,7 @@
<project name="module-build" xmlns:artifact="antlib:org.apache.maven.artifact.ant"> <project name="module-build" xmlns:artifact="antlib:org.apache.maven.artifact.ant">
<echo>Building ${ant.project.name}...</echo> <echo>Building ${ant.project.name}...</echo>
<!-- TODO: adjust build.dir/dist.dir appropriately when a contrib project is run individually --> <!-- TODO: adjust build.dir/dist.dir appropriately when a module is run individually -->
<dirname file="${ant.file.module-build}" property="module-build.dir"/> <dirname file="${ant.file.module-build}" property="module-build.dir"/>
<property name="build.dir" location="${module-build.dir}/build/${ant.project.name}"/> <property name="build.dir" location="${module-build.dir}/build/${ant.project.name}"/>
<property name="dist.dir" location="${module-build.dir}/dist/${ant.project.name}"/> <property name="dist.dir" location="${module-build.dir}/dist/${ant.project.name}"/>

View File

@ -30,6 +30,6 @@
<li>A modular design with expandable support for new query/filter types</li> <li>A modular design with expandable support for new query/filter types</li>
</ul> </ul>
</p> </p>
<p>This code is dependent on the "queries" contrib module although the "CoreParser" can be compiled with just Lucene core if required</p> <p>This code is dependent on the "queries" module although the "CoreParser" can be compiled with just Lucene core if required</p>
</body> </body>
</html> </html>

View File

@ -893,7 +893,7 @@ public class TestQPHelper extends LuceneTestCase {
assertEscapedQueryEquals("&& abc &&", a, "\\&\\& abc \\&\\&"); assertEscapedQueryEquals("&& abc &&", a, "\\&\\& abc \\&\\&");
} }
@Ignore("contrib queryparser shouldn't escape wildcard terms") @Ignore("flexible queryparser shouldn't escape wildcard terms")
public void testEscapedWildcard() throws Exception { public void testEscapedWildcard() throws Exception {
StandardQueryParser qp = new StandardQueryParser(); StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));

View File

@ -52,7 +52,7 @@ import org.apache.lucene.util.automaton.RegExp;
* Base Test class for QueryParser subclasses * Base Test class for QueryParser subclasses
*/ */
// TODO: it would be better to refactor the parts that are specific really // TODO: it would be better to refactor the parts that are specific really
// to the core QP and subclass/use the parts that are not in the contrib QP // to the core QP and subclass/use the parts that are not in the flexible QP
public abstract class QueryParserTestBase extends LuceneTestCase { public abstract class QueryParserTestBase extends LuceneTestCase {
public static Analyzer qpAnalyzer = new QPTestAnalyzer(); public static Analyzer qpAnalyzer = new QPTestAnalyzer();