nuke more contrib references

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1328874 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-04-22 12:59:34 +00:00
parent 0bf1f362eb
commit 29f71547d3
9 changed files with 12 additions and 12 deletions

View File

@ -31,6 +31,6 @@ using this Analyzer you are NOT affected.
* SimpleAnalyzer, StopAnalyzer, LetterTokenizer, LowerCaseFilter, and
LowerCaseTokenizer may return different results, along with many other Analyzers
and TokenStreams in Lucene's contrib area. If you are using one of these
and TokenStreams in Lucene's analysis modules. If you are using one of these
components, you may be affected.

View File

@ -51,8 +51,8 @@ import org.apache.lucene.util.AttributeSource;
* </li>
* </ol>
* <p>
* The <code>ICUCollationAttributeFactory</code> in the icu package of Lucene's
* contrib area uses ICU4J's Collator, which makes its
* The <code>ICUCollationAttributeFactory</code> in the analysis-icu package
* uses ICU4J's Collator, which makes its
* version available, thus allowing collation to be versioned independently
* from the JVM. ICUCollationAttributeFactory is also significantly faster and
* generates significantly shorter keys than CollationAttributeFactory. See

View File

@ -58,8 +58,8 @@ import java.io.Reader;
* </li>
* </ol>
* <p>
* The <code>ICUCollationKeyAnalyzer</code> in the icu package of Lucene's
* contrib area uses ICU4J's Collator, which makes its
* The <code>ICUCollationKeyAnalyzer</code> in the analysis-icu package
* uses ICU4J's Collator, which makes its
* its version available, thus allowing collation to be versioned
* independently from the JVM. ICUCollationKeyAnalyzer is also significantly
* faster and generates significantly shorter keys than CollationKeyAnalyzer.

View File

@ -55,8 +55,8 @@ import java.text.Collator;
* </li>
* </ol>
* <p>
* The <code>ICUCollationKeyFilter</code> in the icu package of Lucene's
* contrib area uses ICU4J's Collator, which makes its
* The <code>ICUCollationKeyFilter</code> in the analysis-icu package
* uses ICU4J's Collator, which makes its
* version available, thus allowing collation to be versioned independently
* from the JVM. ICUCollationKeyFilter is also significantly faster and
* generates significantly shorter keys than CollationKeyFilter. See

View File

@ -325,7 +325,7 @@ public class MemoryIndex {
* Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
* Lucene {@link org.apache.lucene.document.Field}.
* Finally closes the token stream. Note that untokenized keywords can be added with this method via
* {@link #keywordTokenStream(Collection)}, the Lucene contrib <code>KeywordTokenizer</code> or similar utilities.
* {@link #keywordTokenStream(Collection)}, the Lucene <code>KeywordTokenizer</code> or similar utilities.
*
* @param fieldName
* a name to be associated with the text

View File

@ -20,7 +20,7 @@
<project name="module-build" xmlns:artifact="antlib:org.apache.maven.artifact.ant">
<echo>Building ${ant.project.name}...</echo>
<!-- TODO: adjust build.dir/dist.dir appropriately when a contrib project is run individually -->
<!-- TODO: adjust build.dir/dist.dir appropriately when a module is run individually -->
<dirname file="${ant.file.module-build}" property="module-build.dir"/>
<property name="build.dir" location="${module-build.dir}/build/${ant.project.name}"/>
<property name="dist.dir" location="${module-build.dir}/dist/${ant.project.name}"/>

View File

@ -30,6 +30,6 @@
<li>A modular design with expandable support for new query/filter types</li>
</ul>
</p>
<p>This code is dependent on the "queries" contrib module although the "CoreParser" can be compiled with just Lucene core if required</p>
<p>This code is dependent on the "queries" module although the "CoreParser" can be compiled with just Lucene core if required</p>
</body>
</html>

View File

@ -893,7 +893,7 @@ public class TestQPHelper extends LuceneTestCase {
assertEscapedQueryEquals("&& abc &&", a, "\\&\\& abc \\&\\&");
}
@Ignore("contrib queryparser shouldn't escape wildcard terms")
@Ignore("flexible queryparser shouldn't escape wildcard terms")
public void testEscapedWildcard() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));

View File

@ -52,7 +52,7 @@ import org.apache.lucene.util.automaton.RegExp;
* Base Test class for QueryParser subclasses
*/
// TODO: it would be better to refactor the parts that are specific really
// to the core QP and subclass/use the parts that are not in the contrib QP
// to the core QP and subclass/use the parts that are not in the flexible QP
public abstract class QueryParserTestBase extends LuceneTestCase {
public static Analyzer qpAnalyzer = new QPTestAnalyzer();