LUCENE-6080: remove some deprecations from trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1642374 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2014-11-28 23:05:14 +00:00
parent 20e0d50186
commit 46e8f846c5
24 changed files with 28 additions and 642 deletions

View File

@ -1,72 +0,0 @@
package org.apache.lucene.analysis.miscellaneous;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenStream} containing a single token.
* @deprecated Do not use this anymore!
*/
@Deprecated
public final class SingleTokenTokenStream extends TokenStream {
private boolean exhausted = false;
// The token needs to be immutable, so work with clones!
private Token singleToken;
private final AttributeImpl tokenAtt;
public SingleTokenTokenStream(Token token) {
super(Token.TOKEN_ATTRIBUTE_FACTORY);
assert token != null;
this.singleToken = token.clone();
tokenAtt = (AttributeImpl) addAttribute(CharTermAttribute.class);
assert (tokenAtt instanceof Token);
}
@Override
public final boolean incrementToken() {
if (exhausted) {
return false;
} else {
clearAttributes();
singleToken.copyTo(tokenAtt);
exhausted = true;
return true;
}
}
@Override
public void reset() {
exhausted = false;
}
public Token getToken() {
return singleToken.clone();
}
public void setToken(Token token) {
this.singleToken = token.clone();
}
}

View File

@ -18,6 +18,7 @@ package org.apache.lucene.analysis.miscellaneous;
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
@ -31,9 +32,9 @@ public class TestPrefixAndSuffixAwareTokenFilter extends BaseTokenStreamTestCase
final MockTokenizer input = new MockTokenizer(MockTokenizer.WHITESPACE, false);
input.setReader(new StringReader("hello world"));
PrefixAndSuffixAwareTokenFilter ts = new PrefixAndSuffixAwareTokenFilter(
new SingleTokenTokenStream(createToken("^", 0, 0)),
new CannedTokenStream(createToken("^", 0, 0)),
input,
new SingleTokenTokenStream(createToken("$", 0, 0)));
new CannedTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
new String[] { "^", "hello", "world", "$" },

View File

@ -18,6 +18,7 @@ package org.apache.lucene.analysis.miscellaneous;
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Token;
@ -31,8 +32,8 @@ public class TestPrefixAwareTokenFilter extends BaseTokenStreamTestCase {
PrefixAwareTokenFilter ts;
ts = new PrefixAwareTokenFilter(
new SingleTokenTokenStream(createToken("a", 0, 1)),
new SingleTokenTokenStream(createToken("b", 0, 1)));
new CannedTokenStream(createToken("a", 0, 1)),
new CannedTokenStream(createToken("b", 0, 1)));
assertTokenStreamContents(ts,
new String[] { "a", "b" },
new int[] { 0, 1 },
@ -42,9 +43,9 @@ public class TestPrefixAwareTokenFilter extends BaseTokenStreamTestCase {
final MockTokenizer suffix = new MockTokenizer(MockTokenizer.WHITESPACE, false);
suffix.setReader(new StringReader("hello world"));
ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(createToken("^", 0, 0)),
ts = new PrefixAwareTokenFilter(new CannedTokenStream(createToken("^", 0, 0)),
suffix);
ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(createToken("$", 0, 0)));
ts = new PrefixAwareTokenFilter(ts, new CannedTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
new String[] { "^", "hello", "world", "$" },

View File

@ -1,49 +0,0 @@
package org.apache.lucene.analysis.miscellaneous;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
public class TestSingleTokenTokenFilter extends LuceneTestCase {
public void test() throws IOException {
Token token = new Token();
SingleTokenTokenStream ts = new SingleTokenTokenStream(token);
AttributeImpl tokenAtt = (AttributeImpl) ts.addAttribute(CharTermAttribute.class);
assertTrue(tokenAtt instanceof Token);
ts.reset();
assertTrue(ts.incrementToken());
assertEquals(token, tokenAtt);
assertFalse(ts.incrementToken());
token = new Token("hallo", 10, 20);
token.setType("someType");
ts.setToken(token);
ts.reset();
assertTrue(ts.incrementToken());
assertEquals(token, tokenAtt);
assertFalse(ts.incrementToken());
}
}

View File

@ -62,14 +62,14 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
// test that subwords and catenated subwords have
// the correct offsets.
WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("foo-bar", 5, 12)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "foobar", "bar" },
new int[] { 5, 5, 9 },
new int[] { 8, 12, 12 });
wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("foo-bar", 5, 6)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "bar", "foobar" },
@ -80,7 +80,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
@Test
public void testOffsetChange() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("übelkeit)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@ -91,7 +91,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
@Test
public void testOffsetChange2() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(übelkeit", 7, 17)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@ -102,7 +102,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
@Test
public void testOffsetChange3() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(übelkeit", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "übelkeit" },
@ -113,7 +113,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
@Test
public void testOffsetChange4() throws Exception {
int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
WordDelimiterFilter wdf = new WordDelimiterFilter(new SingleTokenTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
WordDelimiterFilter wdf = new WordDelimiterFilter(new CannedTokenStream(new Token("(foo,bar)", 7, 16)), DEFAULT_WORD_DELIM_TABLE, flags, null);
assertTokenStreamContents(wdf,
new String[] { "foo", "foobar", "bar"},

View File

@ -78,14 +78,6 @@ public final class ICUCollationKeyAnalyzer extends Analyzer {
this.factory = new ICUCollationAttributeFactory(collator);
}
/**
* @deprecated Use {@link #ICUCollationKeyAnalyzer(Collator)}
*/
@Deprecated
public ICUCollationKeyAnalyzer(Version matchVersion, Collator collator) {
this.factory = new ICUCollationAttributeFactory(collator);
}
@Override
protected TokenStreamComponents createComponents(String fieldName) {

View File

@ -47,10 +47,6 @@ public class MorfologikFilterFactory extends TokenFilterFactory {
*/
private final String dictionaryResource;
/** Schema attribute. */
@Deprecated
public static final String DICTIONARY_SCHEMA_ATTRIBUTE = "dictionary";
/** Dictionary resource */
public static final String DICTIONARY_RESOURCE_ATTRIBUTE = "dictionary-resource";
@ -58,14 +54,6 @@ public class MorfologikFilterFactory extends TokenFilterFactory {
public MorfologikFilterFactory(Map<String,String> args) {
super(args);
// Be specific about no-longer-supported dictionary attribute.
String dictionaryName = get(args, DICTIONARY_SCHEMA_ATTRIBUTE);
if (dictionaryName != null && !dictionaryName.isEmpty()) {
throw new IllegalArgumentException("The " + DICTIONARY_SCHEMA_ATTRIBUTE + " attribute is no "
+ "longer supported (Morfologik now offers one unified Polish dictionary): " + dictionaryName
+ ". Perhaps you wanted to use 'dictionary-resource' attribute instead?");
}
dictionaryResource = get(args, DICTIONARY_RESOURCE_ATTRIBUTE, DEFAULT_DICTIONARY_RESOURCE);
if (!args.isEmpty()) {

View File

@ -1,105 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.cn.smart;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.hhmm.SegToken;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
/**
* A {@link TokenFilter} that breaks sentences into words.
* @lucene.experimental
* @deprecated Use {@link HMMChineseTokenizer} instead.
*/
@Deprecated
public final class WordTokenFilter extends TokenFilter {
private WordSegmenter wordSegmenter;
private Iterator<SegToken> tokenIter;
private List<SegToken> tokenBuffer;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
private int tokStart; // only used if the length changed before this filter
private int tokEnd; // only used if the length changed before this filter
private boolean hasIllegalOffsets; // only if the length changed before this filter
/**
* Construct a new WordTokenizer.
*
* @param in {@link TokenStream} of sentences
*/
public WordTokenFilter(TokenStream in) {
super(in);
this.wordSegmenter = new WordSegmenter();
}
@Override
public boolean incrementToken() throws IOException {
if (tokenIter == null || !tokenIter.hasNext()) {
// there are no remaining tokens from the current sentence... are there more sentences?
if (input.incrementToken()) {
tokStart = offsetAtt.startOffset();
tokEnd = offsetAtt.endOffset();
// if length by start + end offsets doesn't match the term text then assume
// this is a synonym and don't adjust the offsets.
hasIllegalOffsets = (tokStart + termAtt.length()) != tokEnd;
// a new sentence is available: process it.
tokenBuffer = wordSegmenter.segmentSentence(termAtt.toString(), offsetAtt.startOffset());
tokenIter = tokenBuffer.iterator();
/*
* it should not be possible to have a sentence with 0 words, check just in case.
* returning EOS isn't the best either, but its the behavior of the original code.
*/
if (!tokenIter.hasNext())
return false;
} else {
return false; // no more sentences, end of stream!
}
}
// WordTokenFilter must clear attributes, as it is creating new tokens.
clearAttributes();
// There are remaining tokens from the current sentence, return the next one.
SegToken nextWord = tokenIter.next();
termAtt.copyBuffer(nextWord.charArray, 0, nextWord.charArray.length);
if (hasIllegalOffsets) {
offsetAtt.setOffset(tokStart, tokEnd);
} else {
offsetAtt.setOffset(nextWord.startOffset, nextWord.endOffset);
}
typeAtt.setType("word");
return true;
}
@Override
public void reset() throws IOException {
super.reset();
tokenIter = null;
}
}

View File

@ -207,24 +207,6 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
}
}
// LUCENE-3642
public void testInvalidOffset() throws Exception {
Analyzer analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
filters = new WordTokenFilter(filters);
return new TokenStreamComponents(tokenizer, filters);
}
};
assertAnalyzesTo(analyzer, "mosfellsbær",
new String[] { "mosfellsbaer" },
new int[] { 0 },
new int[] { 11 });
}
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
checkRandomData(random(), new SmartChineseAnalyzer(), 1000*RANDOM_MULTIPLIER);
@ -235,16 +217,4 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
Random random = random();
checkRandomData(random, new SmartChineseAnalyzer(), 100*RANDOM_MULTIPLIER, 8192);
}
public void testEmptyTerm() throws IOException {
Random random = random();
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, new WordTokenFilter(tokenizer));
}
};
checkAnalysisConsistency(random, a, random.nextBoolean(), "");
}
}

View File

@ -422,18 +422,6 @@ public final class CodecUtil {
}
}
/**
* Checks that the stream is positioned at the end, and throws exception
* if it is not.
* @deprecated Use {@link #checkFooter} instead, this should only used for files without checksums
*/
@Deprecated
public static void checkEOF(IndexInput in) throws IOException {
if (in.getFilePointer() != in.length()) {
throw new CorruptIndexException("did not read all bytes from file: read " + in.getFilePointer() + " vs size " + in.length(), in);
}
}
/**
* Clones the provided input, reads all bytes from the file, and calls {@link #checkFooter}
* <p>

View File

@ -70,10 +70,6 @@ public class SegmentCommitInfo {
// track the fieldInfos update files
private final Set<String> fieldInfosFiles = new HashSet<>();
// Track the per-generation updates files
@Deprecated
private final Map<Long,Set<String>> genUpdatesFiles = new HashMap<>();
private volatile long sizeInBytes = -1;
/**
@ -100,24 +96,6 @@ public class SegmentCommitInfo {
this.docValuesGen = docValuesGen;
this.nextWriteDocValuesGen = docValuesGen == -1 ? 1 : docValuesGen + 1;
}
/**
* Sets the updates file names per generation. Does not deep clone the map.
*
* @deprecated required to support 4.6-4.8 indexes.
*/
@Deprecated
public void setGenUpdatesFiles(Map<Long,Set<String>> genUpdatesFiles) {
this.genUpdatesFiles.clear();
for (Map.Entry<Long,Set<String>> kv : genUpdatesFiles.entrySet()) {
// rename the set
Set<String> set = new HashSet<>();
for (String file : kv.getValue()) {
set.add(info.namedForThisSegment(file));
}
this.genUpdatesFiles.put(kv.getKey(), set);
}
}
/** Returns the per-field DocValues updates files. */
public Map<Integer,Set<String>> getDocValuesUpdatesFiles() {
@ -248,12 +226,6 @@ public class SegmentCommitInfo {
// Must separately add any live docs files:
info.getCodec().liveDocsFormat().files(this, files);
// Must separately add any per-gen updates files. This can go away when we
// get rid of genUpdatesFiles (6.0)
for (Set<String> updateFiles : genUpdatesFiles.values()) {
files.addAll(updateFiles);
}
// must separately add any field updates files
for (Set<String> updatefiles : dvUpdatesFiles.values()) {
@ -377,11 +349,6 @@ public class SegmentCommitInfo {
other.nextWriteFieldInfosGen = nextWriteFieldInfosGen;
other.nextWriteDocValuesGen = nextWriteDocValuesGen;
// deep clone
for (Entry<Long,Set<String>> e : genUpdatesFiles.entrySet()) {
other.genUpdatesFiles.put(e.getKey(), new HashSet<>(e.getValue()));
}
// deep clone
for (Entry<Integer,Set<String>> e : dvUpdatesFiles.entrySet()) {
other.dvUpdatesFiles.put(e.getKey(), new HashSet<>(e.getValue()));

View File

@ -46,14 +46,6 @@ import java.util.Map;
* @lucene.internal */
public final class IOUtils {
/**
* UTF-8 {@link Charset} instance to prevent repeated
* {@link Charset#forName(String)} lookups
* @deprecated Use {@link StandardCharsets#UTF_8} instead.
*/
@Deprecated
public static final Charset CHARSET_UTF_8 = StandardCharsets.UTF_8;
/**
* UTF-8 charset string.
* <p>Where possible, use {@link StandardCharsets#UTF_8} instead,

View File

@ -19,7 +19,6 @@ package org.apache.lucene.queryparser.flexible.precedence.processors;
import org.apache.lucene.queryparser.flexible.precedence.PrecedenceQueryParser;
import org.apache.lucene.queryparser.flexible.standard.processors.BooleanQuery2ModifierNodeProcessor;
import org.apache.lucene.queryparser.flexible.standard.processors.GroupQueryNodeProcessor;
import org.apache.lucene.queryparser.flexible.standard.processors.StandardQueryNodeProcessorPipeline;
import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler;
@ -29,7 +28,7 @@ import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler;
* boolean precedence on it.
* </p>
* <p>
* EXPERT: the precedence is enabled by removing {@link GroupQueryNodeProcessor} from the
* EXPERT: the precedence is enabled by removing {@link BooleanQuery2ModifierNodeProcessor} from the
* {@link StandardQueryNodeProcessorPipeline} and appending {@link BooleanModifiersQueryNodeProcessor}
* to the pipeline.
* </p>

View File

@ -366,17 +366,6 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer
return getQueryConfigHandler().get(ConfigurationKeys.TIMEZONE);
}
/**
* Sets the default slop for phrases. If zero, then exact phrase matches are
* required. Default value is zero.
*
* @deprecated renamed to {@link #setPhraseSlop(int)}
*/
@Deprecated
public void setDefaultPhraseSlop(int defaultPhraseSlop) {
getQueryConfigHandler().set(ConfigurationKeys.PHRASE_SLOP, defaultPhraseSlop);
}
/**
* Sets the default slop for phrases. If zero, then exact phrase matches are
* required. Default value is zero.
@ -510,18 +499,6 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer
public DateTools.Resolution getDateResolution() {
return getQueryConfigHandler().get(ConfigurationKeys.DATE_RESOLUTION);
}
/**
* Sets the {@link Resolution} used for each field
*
* @param dateRes a collection that maps a field to its {@link Resolution}
*
* @deprecated this method was renamed to {@link #setDateResolutionMap(Map)}
*/
@Deprecated
public void setDateResolution(Map<CharSequence, DateTools.Resolution> dateRes) {
setDateResolutionMap(dateRes);
}
/**
* Returns the field to {@link Resolution} map used to normalize each date field.

View File

@ -19,12 +19,12 @@ package org.apache.lucene.queryparser.flexible.standard.nodes;
import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.apache.lucene.queryparser.flexible.standard.processors.GroupQueryNodeProcessor;
import org.apache.lucene.queryparser.flexible.standard.processors.BooleanQuery2ModifierNodeProcessor;
/**
* A {@link BooleanModifierNode} has the same behaviour as
* {@link ModifierQueryNode}, it only indicates that this modifier was added by
* {@link GroupQueryNodeProcessor} and not by the user. <br/>
* {@link BooleanQuery2ModifierNodeProcessor} and not by the user. <br/>
*
* @see ModifierQueryNode
*/

View File

@ -1,222 +0,0 @@
package org.apache.lucene.queryparser.flexible.standard.processors;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler;
import org.apache.lucene.queryparser.flexible.core.nodes.AndQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.OrQueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modifier;
import org.apache.lucene.queryparser.flexible.core.parser.SyntaxParser;
import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor;
import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys;
import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator;
import org.apache.lucene.queryparser.flexible.standard.nodes.BooleanModifierNode;
/**
* The {@link SyntaxParser}
* generates query node trees that consider the boolean operator precedence, but
* Lucene current syntax does not support boolean precedence, so this processor
* remove all the precedence and apply the equivalent modifier according to the
* boolean operation defined on an specific query node. <br/>
* <br/>
* If there is a {@link GroupQueryNode} in the query node tree, the query node
* tree is not merged with the one above it.
*
* Example: TODO: describe a good example to show how this processor works
*
* @see org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler
* @deprecated use {@link BooleanQuery2ModifierNodeProcessor} instead
*/
public class GroupQueryNodeProcessor implements QueryNodeProcessor {
private ArrayList<QueryNode> queryNodeList;
private boolean latestNodeVerified;
private QueryConfigHandler queryConfig;
private Boolean usingAnd = false;
public GroupQueryNodeProcessor() {
// empty constructor
}
@Override
public QueryNode process(QueryNode queryTree) throws QueryNodeException {
Operator defaultOperator = getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR);
if (defaultOperator == null) {
throw new IllegalArgumentException(
"DEFAULT_OPERATOR should be set on the QueryConfigHandler");
}
this.usingAnd = StandardQueryConfigHandler.Operator.AND == defaultOperator;
if (queryTree instanceof GroupQueryNode) {
queryTree = ((GroupQueryNode) queryTree).getChild();
}
this.queryNodeList = new ArrayList<>();
this.latestNodeVerified = false;
readTree(queryTree);
List<QueryNode> actualQueryNodeList = this.queryNodeList;
for (int i = 0; i < actualQueryNodeList.size(); i++) {
QueryNode node = actualQueryNodeList.get(i);
if (node instanceof GroupQueryNode) {
actualQueryNodeList.set(i, process(node));
}
}
this.usingAnd = false;
if (queryTree instanceof BooleanQueryNode) {
queryTree.set(actualQueryNodeList);
return queryTree;
} else {
return new BooleanQueryNode(actualQueryNodeList);
}
}
/**
*/
private QueryNode applyModifier(QueryNode node, QueryNode parent) {
if (this.usingAnd) {
if (parent instanceof OrQueryNode) {
if (node instanceof ModifierQueryNode) {
ModifierQueryNode modNode = (ModifierQueryNode) node;
if (modNode.getModifier() == Modifier.MOD_REQ) {
return modNode.getChild();
}
}
} else {
if (node instanceof ModifierQueryNode) {
ModifierQueryNode modNode = (ModifierQueryNode) node;
if (modNode.getModifier() == Modifier.MOD_NONE) {
return new BooleanModifierNode(modNode.getChild(), Modifier.MOD_REQ);
}
} else {
return new BooleanModifierNode(node, Modifier.MOD_REQ);
}
}
} else {
if (node.getParent() instanceof AndQueryNode) {
if (node instanceof ModifierQueryNode) {
ModifierQueryNode modNode = (ModifierQueryNode) node;
if (modNode.getModifier() == Modifier.MOD_NONE) {
return new BooleanModifierNode(modNode.getChild(), Modifier.MOD_REQ);
}
} else {
return new BooleanModifierNode(node, Modifier.MOD_REQ);
}
}
}
return node;
}
private void readTree(QueryNode node) {
if (node instanceof BooleanQueryNode) {
List<QueryNode> children = node.getChildren();
if (children != null && children.size() > 0) {
for (int i = 0; i < children.size() - 1; i++) {
readTree(children.get(i));
}
processNode(node);
readTree(children.get(children.size() - 1));
} else {
processNode(node);
}
} else {
processNode(node);
}
}
private void processNode(QueryNode node) {
if (node instanceof AndQueryNode || node instanceof OrQueryNode) {
if (!this.latestNodeVerified && !this.queryNodeList.isEmpty()) {
this.queryNodeList.add(applyModifier(this.queryNodeList
.remove(this.queryNodeList.size() - 1), node));
this.latestNodeVerified = true;
}
} else if (!(node instanceof BooleanQueryNode)) {
this.queryNodeList.add(applyModifier(node, node.getParent()));
this.latestNodeVerified = false;
}
}
@Override
public QueryConfigHandler getQueryConfigHandler() {
return this.queryConfig;
}
@Override
public void setQueryConfigHandler(QueryConfigHandler queryConfigHandler) {
this.queryConfig = queryConfigHandler;
}
}

View File

@ -434,14 +434,14 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
Map<CharSequence, DateTools.Resolution> fieldMap = new HashMap<>();
// set a field specific date resolution
fieldMap.put(monthField, DateTools.Resolution.MONTH);
qp.setDateResolution(fieldMap);
qp.setDateResolutionMap(fieldMap);
// set default date resolution to MILLISECOND
qp.setDateResolution(DateTools.Resolution.MILLISECOND);
// set second field specific date resolution
fieldMap.put(hourField, DateTools.Resolution.HOUR);
qp.setDateResolution(fieldMap);
qp.setDateResolutionMap(fieldMap);
// for this field no field specific date resolution has been set,
// so verify if the default resolution is used

View File

@ -93,12 +93,12 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
.toString());
// phrase after changing default slop
qp.setDefaultPhraseSlop(99);
qp.setPhraseSlop(99);
assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar",
"").toString());
assertEquals("\"(multi multi2) foo\"~99 \"foo bar\"~2", qp.parse(
"\"multi foo\" \"foo bar\"~2", "").toString());
qp.setDefaultPhraseSlop(0);
qp.setPhraseSlop(0);
// non-default operator:
qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND);

View File

@ -744,14 +744,14 @@ public class TestQPHelper extends LuceneTestCase {
// set a field specific date resolution
dateRes.put(monthField, DateTools.Resolution.MONTH);
qp.setDateResolution(dateRes);
qp.setDateResolutionMap(dateRes);
// set default date resolution to MILLISECOND
qp.setDateResolution(DateTools.Resolution.MILLISECOND);
// set second field specific date resolution
dateRes.put(hourField, DateTools.Resolution.HOUR);
qp.setDateResolution(dateRes);
qp.setDateResolutionMap(dateRes);
// for this field no field specific date resolution has been set,
// so verify if the default resolution is used

View File

@ -82,7 +82,6 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.Version;
// TODO:
// - a PostingsFormat that stores super-high-freq terms as
@ -125,7 +124,6 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
protected final Analyzer queryAnalyzer;
/** Analyzer used at index time */
protected final Analyzer indexAnalyzer;
final Version matchVersion;
private final Directory dir;
final int minPrefixChars;
private final boolean commitOnBuild;
@ -152,15 +150,6 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
this(dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS, false);
}
/**
* @deprecated Use {@link #AnalyzingInfixSuggester(Directory, Analyzer)}
*/
@Deprecated
public AnalyzingInfixSuggester(Version matchVersion, Directory dir, Analyzer analyzer) throws IOException {
this(matchVersion, dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS, false);
}
/** Create a new instance, loading from a previously built
* AnalyzingInfixSuggester directory, if it exists. This directory must be
* private to the infix suggester (i.e., not an external
@ -178,15 +167,6 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
*/
public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars,
boolean commitOnBuild) throws IOException {
this(indexAnalyzer.getVersion(), dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild);
}
/**
* @deprecated Use {@link #AnalyzingInfixSuggester(Directory, Analyzer, Analyzer, int, boolean)}
*/
@Deprecated
public AnalyzingInfixSuggester(Version matchVersion, Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars,
boolean commitOnBuild) throws IOException {
if (minPrefixChars < 0) {
throw new IllegalArgumentException("minPrefixChars must be >= 0; got: " + minPrefixChars);
@ -194,7 +174,6 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
this.queryAnalyzer = queryAnalyzer;
this.indexAnalyzer = indexAnalyzer;
this.matchVersion = matchVersion;
this.dir = dir;
this.minPrefixChars = minPrefixChars;
this.commitOnBuild = commitOnBuild;

View File

@ -41,7 +41,6 @@ import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.suggest.Lookup;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Version;
// TODO:
// - allow to use the search score
@ -97,15 +96,7 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
* directory, if it exists.
*/
public BlendedInfixSuggester(Directory dir, Analyzer analyzer) throws IOException {
this(analyzer.getVersion(), dir, analyzer);
}
/**
* @deprecated Use {@link #BlendedInfixSuggester(Directory, Analyzer)}
*/
@Deprecated
public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer analyzer) throws IOException {
super(matchVersion, dir, analyzer);
super(dir, analyzer);
this.blenderType = BlenderType.POSITION_LINEAR;
this.numFactor = DEFAULT_NUM_FACTOR;
}
@ -122,16 +113,7 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
*/
public BlendedInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer,
int minPrefixChars, BlenderType blenderType, int numFactor, boolean commitOnBuild) throws IOException {
this(indexAnalyzer.getVersion(), dir, indexAnalyzer, queryAnalyzer, minPrefixChars, blenderType, numFactor, commitOnBuild);
}
/**
* @deprecated Use {@link #BlendedInfixSuggester(Directory, Analyzer, Analyzer, int, BlendedInfixSuggester.BlenderType, int, boolean)}
*/
@Deprecated
public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer,
int minPrefixChars, BlenderType blenderType, int numFactor, boolean commitOnBuild) throws IOException {
super(matchVersion, dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild);
super(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild);
this.blenderType = blenderType;
this.numFactor = numFactor;
}

View File

@ -187,7 +187,7 @@ public class ICUCollationField extends FieldType {
rbc.setVariableTop(variableTop);
}
analyzer = new ICUCollationKeyAnalyzer(Version.LATEST, collator);
analyzer = new ICUCollationKeyAnalyzer(collator);
}
/**

View File

@ -96,8 +96,7 @@ public class AnalyzingInfixLookupFactory extends LookupFactory {
: AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS;
try {
return new AnalyzingInfixSuggester(core.getSolrConfig().luceneMatchVersion,
FSDirectory.open(new File(indexPath).toPath()), indexAnalyzer,
return new AnalyzingInfixSuggester(FSDirectory.open(new File(indexPath).toPath()), indexAnalyzer,
queryAnalyzer, minPrefixChars, true) {
@Override
public List<LookupResult> lookup(CharSequence key, Set<BytesRef> contexts, int num, boolean allTermsRequired, boolean doHighlight) throws IOException {

View File

@ -101,8 +101,7 @@ public class BlendedInfixLookupFactory extends AnalyzingInfixLookupFactory {
: BlendedInfixSuggester.DEFAULT_NUM_FACTOR;
try {
return new BlendedInfixSuggester(core.getSolrConfig().luceneMatchVersion,
FSDirectory.open(new File(indexPath).toPath()),
return new BlendedInfixSuggester(FSDirectory.open(new File(indexPath).toPath()),
indexAnalyzer, queryAnalyzer, minPrefixChars,
blenderType, numFactor, true) {
@Override