LUCENE-2022: remove contrib deprecations

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@834707 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2009-11-10 23:22:48 +00:00
parent e1eb5df223
commit c4f2d05a03
12 changed files with 24 additions and 374 deletions

View File

@ -1,37 +0,0 @@
package org.apache.lucene.benchmark.byTask.feeds;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.benchmark.byTask.utils.Config;
/**
* A {@link DocMaker} which reads the English Wikipedia dump. Uses
* {@link EnwikiContentSource} as its content source, regardless if a different
* content source was defined in the configuration.
* @deprecated Please use {@link DocMaker} instead, with content.source=EnwikiContentSource
*/
public class EnwikiDocMaker extends DocMaker {
@Override
public void setConfig(Config config) {
super.setConfig(config);
// Override whatever content source was set in the config
source = new EnwikiContentSource();
source.setConfig(config);
System.out.println("NOTE: EnwikiDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=EnwikiContentSource");
}
}

View File

@ -1,49 +0,0 @@
package org.apache.lucene.benchmark.byTask.feeds;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.benchmark.byTask.utils.Config;
/**
* A DocMaker reading one line at a time as a Document from a single file. This
* saves IO cost (over DirContentSource) of recursing through a directory and
* opening a new file for every document. It also re-uses its Document and Field
* instance to improve indexing speed.<br>
* The expected format of each line is (arguments are separated by &lt;TAB&gt;):
* <i>title, date, body</i>. If a line is read in a different format, a
* {@link RuntimeException} will be thrown. In general, you should use this doc
* maker with files that were created with
* {@link org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask}.<br>
* <br>
* Config properties:
* <ul>
* <li>doc.random.id.limit=N (default -1) -- create random docid in the range
* 0..N; this is useful with UpdateDoc to test updating random documents; if
* this is unspecified or -1, then docid is sequentially assigned
* </ul>
* @deprecated Please use {@link DocMaker} instead, with content.source=LineDocSource
*/
public class LineDocMaker extends DocMaker {
@Override
public void setConfig(Config config) {
super.setConfig(config);
source = new LineDocSource();
source.setConfig(config);
System.out.println("NOTE: LineDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=LineDocSource");
}
}

View File

@ -69,25 +69,6 @@ public abstract class PerfTask implements Cloneable {
name = name.substring(0, name.length() - 4);
}
}
/**
* @deprecated will be removed in 3.0. checks if there are any obsolete
* settings, like doc.add.log.step and doc.delete.log.step and
* alerts the user.
*/
private void checkObsoleteSettings(Config config) {
if (config.get("doc.add.log.step", null) != null) {
throw new RuntimeException("doc.add.log.step is not supported anymore. " +
"Use log.step.AddDoc and refer to CHANGES to read on the recent " +
"API changes done to Benchmark's DocMaker and Task-based logging.");
}
if (config.get("doc.delete.log.step", null) != null) {
throw new RuntimeException("doc.delete.log.step is not supported anymore. " +
"Use log.step.DeleteDoc and refer to CHANGES to read on the recent " +
"API changes done to Benchmark's DocMaker and Task-based logging.");
}
}
public PerfTask(PerfRunData runData) {
this();
@ -114,7 +95,6 @@ public abstract class PerfTask implements Cloneable {
if (logStep <= 0) {
logStep = Integer.MAX_VALUE;
}
checkObsoleteSettings(config);
}
@Override

View File

@ -255,14 +255,6 @@ public abstract class ReadTask extends PerfTask {
return 0;
}
/**
* @deprecated Use {@link #getBenchmarkHighlighter(Query)}
*/
final Highlighter getHighlighter(Query q) {
// not called
return null;
}
/**
* Return an appropriate highlighter to be used with
* highlighting tasks
@ -270,39 +262,6 @@ public abstract class ReadTask extends PerfTask {
protected BenchmarkHighlighter getBenchmarkHighlighter(Query q){
return null;
}
/**
* @return the maximum number of highlighter fragments
* @deprecated Please define getBenchmarkHighlighter instead
*/
final int maxNumFragments(){
// not called -- we switched this method to final to
// force any external subclasses to cutover to
// getBenchmarkHighlighter instead
return 10;
}
/**
*
* @return true if the highlighter should merge contiguous fragments
* @deprecated Please define getBenchmarkHighlighter instead
*/
final boolean isMergeContiguousFragments(){
// not called -- we switched this method to final to
// force any external subclasses to cutover to
// getBenchmarkHighlighter instead
return false;
}
/**
* @deprecated Please define getBenchmarkHighlighter instead
*/
final int doHighlight(TokenStream ts, String text, Highlighter highlighter, boolean mergeContiguous, int maxFragments) throws IOException, InvalidTokenOffsetsException {
// not called -- we switched this method to final to
// force any external subclasses to cutover to
// getBenchmarkHighlighter instead
return 0;
}
protected Sort getSort() {
return null;

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.Properties;
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
import org.apache.lucene.benchmark.byTask.feeds.EnwikiDocMaker;
import org.apache.lucene.benchmark.byTask.feeds.NoMoreDataException;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.document.Document;
@ -125,16 +124,16 @@ public class ExtractWikipedia {
}
}
DocMaker docMaker = new EnwikiDocMaker();
DocMaker docMaker = new DocMaker();
Properties properties = new Properties();
properties.setProperty("content.source", "org.apache.lucene.benchmark.byTask.feeds.EnwikiContentSource");
properties.setProperty("docs.file", wikipedia.getAbsolutePath());
properties.setProperty("content.source.forever", "false");
properties.setProperty("keep.image.only.docs", String.valueOf(keepImageOnlyDocs));
docMaker.setConfig(new Config(properties));
docMaker.resetInputs();
if (wikipedia != null && wikipedia.exists()) {
System.out.println("Extracting Wikipedia to: " + outputDir + " using EnwikiDocMaker");
System.out.println("Extracting Wikipedia to: " + outputDir + " using EnwikiContentSource");
outputDir.mkdirs();
ExtractWikipedia extractor = new ExtractWikipedia(docMaker, outputDir);
extractor.extract();
@ -145,9 +144,8 @@ public class ExtractWikipedia {
private static void printUsage() {
System.err.println("Usage: java -cp <...> org.apache.lucene.benchmark.utils.ExtractWikipedia --input|-i <Path to Wikipedia XML file> " +
"[--output|-o <Output Path>] [--discardImageOnlyDocs|-d] [--useLineDocMaker|-l]");
"[--output|-o <Output Path>] [--discardImageOnlyDocs|-d]");
System.err.println("--discardImageOnlyDocs tells the extractor to skip Wiki docs that contain only images");
System.err.println("--useLineDocMaker uses the LineDocMaker. Default is EnwikiDocMaker");
}
}

View File

@ -36,10 +36,7 @@ import org.apache.lucene.util.PriorityQueue;
public class Highlighter
{
public static final int DEFAULT_MAX_CHARS_TO_ANALYZE = 50*1024;
/**
* @deprecated See {@link #DEFAULT_MAX_CHARS_TO_ANALYZE}
*/
public static final int DEFAULT_MAX_DOC_BYTES_TO_ANALYZE=DEFAULT_MAX_CHARS_TO_ANALYZE;
private int maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE;
private Formatter formatter;
private Encoder encoder;
@ -112,29 +109,6 @@ public class Highlighter
return null;
}
/**
* Highlights chosen terms in a text, extracting the most relevant sections.
* This is a convenience method that calls
* {@link #getBestFragments(TokenStream, String, int)}
*
* @param analyzer the analyzer that will be used to split <code>text</code>
* into chunks
* @param text text to highlight terms in
* @param maxNumFragments the maximum number of fragments.
* @deprecated This method incorrectly hardcodes the choice of fieldname. Use the
* method of the same name that takes a fieldname.
* @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
* @throws InvalidTokenOffsetsException thrown if any token's endOffset exceeds the provided text's length
*/
public final String[] getBestFragments(
Analyzer analyzer,
String text,
int maxNumFragments)
throws IOException, InvalidTokenOffsetsException
{
TokenStream tokenStream = analyzer.tokenStream("field", new StringReader(text));
return getBestFragments(tokenStream, text, maxNumFragments);
}
/**
* Highlights chosen terms in a text, extracting the most relevant sections.
* This is a convenience method that calls
@ -498,27 +472,6 @@ public class Highlighter
return result.toString();
}
/**
* @return the maximum number of bytes to be tokenized per doc
*
* @deprecated See {@link #getMaxDocCharsToAnalyze()}, since this value has always counted on chars. They both set the same internal value, however
*/
public int getMaxDocBytesToAnalyze()
{
return maxDocCharsToAnalyze;
}
/**
* @param byteCount the maximum number of bytes to be tokenized per doc
* (This can improve performance with large documents)
*
* @deprecated See {@link #setMaxDocCharsToAnalyze(int)}, since this value has always counted chars
*/
public void setMaxDocBytesToAnalyze(int byteCount)
{
maxDocCharsToAnalyze = byteCount;
}
public int getMaxDocCharsToAnalyze() {
return maxDocCharsToAnalyze;
}

View File

@ -37,7 +37,10 @@ public class TextFragment
this.textStartPos = textStartPos;
this.fragNum = fragNum;
}
/** @deprecated */
/**
* @deprecated Use {@link #TextFragment(CharSequence, int, int)} instead.
* This constructor will be removed in Lucene 4.0
*/
public TextFragment(StringBuffer markedUpText,int textStartPos, int fragNum)
{
this.markedUpText=markedUpText;

View File

@ -1096,7 +1096,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
HighlighterTest.this);// new Highlighter(this, new
// QueryTermScorer(query));
highlighter.setMaxDocBytesToAnalyze(30);
highlighter.setMaxDocCharsToAnalyze(30);
highlighter.getBestFragment(tokenStream, texts[0]);
assertTrue("Setting MaxDocBytesToAnalyze should have prevented "
@ -1133,10 +1133,10 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// new
// QueryTermScorer(query));
hg.setTextFragmenter(new NullFragmenter());
hg.setMaxDocBytesToAnalyze(100);
hg.setMaxDocCharsToAnalyze(100);
match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocBytesToAnalyze());
.getMaxDocCharsToAnalyze());
// add another tokenized word to the overrall length - but set way
// beyond
@ -1147,7 +1147,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
sb.append(goodWord);
match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString());
assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg
.getMaxDocBytesToAnalyze());
.getMaxDocCharsToAnalyze());
}
};

View File

@ -150,16 +150,14 @@ public class ChainedFilter extends Filter
return result;
}
// TODO: in 3.0, instead of removing this deprecated
// method, make it a no-op and mark it final
/** Provide a SortedVIntList when it is definitely
* smaller than an OpenBitSet
* @deprecated Either use CachingWrapperFilter, or
* switch to a different DocIdSet implementation yourself. */
protected DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return (result.cardinality() < (maxDocs / 9))
? (DocIdSet) new SortedVIntList(result)
: (DocIdSet) result;
* switch to a different DocIdSet implementation yourself.
* This method will be removed in Lucene 4.0
**/
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return result;
}

View File

@ -1,153 +0,0 @@
package org.apache.lucene.misc;
/**
* Copyright 2006 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.StringHelper;
import java.io.File;
import java.io.IOException;
import java.util.Date;
/**
* Given a directory, a Similarity, and a list of fields, updates the
* fieldNorms in place for every document using the Similarity.lengthNorm.
*
* <p>
* NOTE: This only works if you do <b>not</b> use field/document boosts in your
* index.
* </p>
*
* @version $Id$
* @deprecated Use {@link org.apache.lucene.index.FieldNormModifier}
*/
public class LengthNormModifier {
/**
* Command Line Execution method.
*
* <pre>
* Usage: LengthNormModifier /path/index package.SimilarityClassName field1 field2 ...
* </pre>
*/
public static void main(String[] args) throws IOException {
if (args.length < 3) {
System.err.println("Usage: LengthNormModifier <index> <package.SimilarityClassName> <field1> [field2] ...");
System.exit(1);
}
Similarity s = null;
try {
s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
} catch (Exception e) {
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
e.printStackTrace(System.err);
}
File index = new File(args[0]);
Directory d = FSDirectory.open(index);
LengthNormModifier lnm = new LengthNormModifier(d, s);
for (int i = 2; i < args.length; i++) {
System.out.print("Updating field: " + args[i] + " " + (new Date()).toString() + " ... ");
lnm.reSetNorms(args[i]);
System.out.println(new Date().toString());
}
d.close();
}
private Directory dir;
private Similarity sim;
/**
* Constructor for code that wishes to use this class progaomatically.
*
* @param d The Directory to modify
* @param s The Similarity to use in <code>reSetNorms</code>
*/
public LengthNormModifier(Directory d, Similarity s) {
dir = d;
sim = s;
}
/**
* Resets the norms for the specified field.
*
* <p>
* Opens a new IndexReader on the Directory given to this instance,
* modifies the norms using the Similarity given to this instance,
* and closes the IndexReader.
* </p>
*
* @param field the field whose norms should be reset
*/
public void reSetNorms(String field) throws IOException {
String fieldName = StringHelper.intern(field);
int[] termCounts = new int[0];
IndexReader reader = null;
TermEnum termEnum = null;
TermDocs termDocs = null;
try {
reader = IndexReader.open(dir, false);
termCounts = new int[reader.maxDoc()];
try {
termEnum = reader.terms(new Term(field));
try {
termDocs = reader.termDocs();
do {
Term term = termEnum.term();
if (term != null && term.field().equals(fieldName)) {
termDocs.seek(termEnum.term());
while (termDocs.next()) {
termCounts[termDocs.doc()] += termDocs.freq();
}
}
} while (termEnum.next());
} finally {
if (null != termDocs) termDocs.close();
}
} finally {
if (null != termEnum) termEnum.close();
}
} finally {
if (null != reader) reader.close();
}
try {
reader = IndexReader.open(dir, false);
for (int d = 0; d < termCounts.length; d++) {
if (! reader.isDeleted(d)) {
byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
reader.setNorm(d, fieldName, norm);
}
}
} finally {
if (null != reader) reader.close();
}
}
}

View File

@ -112,16 +112,14 @@ public class BooleanFilter extends Filter
return DocIdSet.EMPTY_DOCIDSET;
}
// TODO: in 3.0, instead of removing this deprecated
// method, make it a no-op and mark it final
/** Provide a SortedVIntList when it is definitely smaller
* than an OpenBitSet.
* @deprecated Either use CachingWrapperFilter, or
* switch to a different DocIdSet implementation yourself. */
protected DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return (result.cardinality() < (maxDocs / 9))
? (DocIdSet) new SortedVIntList(result)
: (DocIdSet) result;
* switch to a different DocIdSet implementation yourself.
* This method will be removed in Lucene 4.0
*/
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
return result;
}
/**

View File

@ -25,7 +25,7 @@ package org.apache.lucene.spatial.geometry.shape;
* release.</font>
*
* @deprecated This has been replaced with more accurate
* math in {@link LLRect}.
* math in {@link LLRect}. This class will be removed in a future release.
*/
public class DistanceApproximation
{