diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java index e4bd5fa97ce..77275b7dddd 100644 --- a/src/main/java/org/elasticsearch/Version.java +++ b/src/main/java/org/elasticsearch/Version.java @@ -31,6 +31,7 @@ import java.io.Serializable; /** */ +@SuppressWarnings("deprecation") public class Version implements Serializable { // The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 41c2841a30f..139a2d58091 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -21,7 +21,10 @@ package org.elasticsearch.common.lucene; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.index.*; +import org.apache.lucene.index.AtomicReaderContext; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.*; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -52,6 +55,7 @@ public class Lucene { public static ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; + @SuppressWarnings("deprecation") public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) { if (version == null) { return defaultVersion; @@ -359,7 +363,7 @@ public class Lucene { private Lucene() { } - + public static final boolean indexExists(final Directory directory) throws IOException { return DirectoryReader.indexExists(directory); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java index d900b0fbd9e..944d3581669 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java +++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java @@ -64,32 +64,32 @@ public class JsonXContent implements XContent { @Override public XContentGenerator createGenerator(OutputStream os) throws IOException { - return new JsonXContentGenerator(jsonFactory.createJsonGenerator(os, JsonEncoding.UTF8)); + return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8)); } @Override public XContentGenerator createGenerator(Writer writer) throws IOException { - return new JsonXContentGenerator(jsonFactory.createJsonGenerator(writer)); + return new JsonXContentGenerator(jsonFactory.createGenerator(writer)); } @Override public XContentParser createParser(String content) throws IOException { - return new JsonXContentParser(jsonFactory.createJsonParser(new FastStringReader(content))); + return new JsonXContentParser(jsonFactory.createParser(new FastStringReader(content))); } @Override public XContentParser createParser(InputStream is) throws IOException { - return new JsonXContentParser(jsonFactory.createJsonParser(is)); + return new JsonXContentParser(jsonFactory.createParser(is)); } @Override public XContentParser createParser(byte[] data) throws IOException { - return new JsonXContentParser(jsonFactory.createJsonParser(data)); + return new JsonXContentParser(jsonFactory.createParser(data)); } @Override public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new JsonXContentParser(jsonFactory.createJsonParser(data, offset, length)); + return new JsonXContentParser(jsonFactory.createParser(data, offset, length)); } @Override diff --git a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index c9a3c1437dc..96dd326c424 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -62,32 +62,32 @@ public class SmileXContent implements XContent { @Override public XContentGenerator createGenerator(OutputStream os) throws IOException { - return new SmileXContentGenerator(smileFactory.createJsonGenerator(os, JsonEncoding.UTF8)); + return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8)); } @Override public XContentGenerator createGenerator(Writer writer) throws IOException { - return new SmileXContentGenerator(smileFactory.createJsonGenerator(writer)); + return new SmileXContentGenerator(smileFactory.createGenerator(writer)); } @Override public XContentParser createParser(String content) throws IOException { - return new SmileXContentParser(smileFactory.createJsonParser(new FastStringReader(content))); + return new SmileXContentParser(smileFactory.createParser(new FastStringReader(content))); } @Override public XContentParser createParser(InputStream is) throws IOException { - return new SmileXContentParser(smileFactory.createJsonParser(is)); + return new SmileXContentParser(smileFactory.createParser(is)); } @Override public XContentParser createParser(byte[] data) throws IOException { - return new SmileXContentParser(smileFactory.createJsonParser(data)); + return new SmileXContentParser(smileFactory.createParser(data)); } @Override public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new SmileXContentParser(smileFactory.createJsonParser(data, offset, length)); + return new SmileXContentParser(smileFactory.createParser(data, offset, length)); } @Override diff --git a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java index 93ed86bdf20..4f639ea9046 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java +++ b/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java @@ -46,7 +46,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException { writeFieldName(fieldName); - SmileParser parser = SmileXContent.smileFactory.createJsonParser(content); + SmileParser parser = SmileXContent.smileFactory.createParser(content); try { parser.nextToken(); generator.copyCurrentStructure(parser); @@ -58,7 +58,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException { writeFieldName(fieldName); - SmileParser parser = SmileXContent.smileFactory.createJsonParser(content); + SmileParser parser = SmileXContent.smileFactory.createParser(content); try { parser.nextToken(); generator.copyCurrentStructure(parser); @@ -72,9 +72,9 @@ public class SmileXContentGenerator extends JsonXContentGenerator { writeFieldName(fieldName); SmileParser parser; if (content.hasArray()) { - parser = SmileXContent.smileFactory.createJsonParser(content.array(), content.arrayOffset(), content.length()); + parser = SmileXContent.smileFactory.createParser(content.array(), content.arrayOffset(), content.length()); } else { - parser = SmileXContent.smileFactory.createJsonParser(content.streamInput()); + parser = SmileXContent.smileFactory.createParser(content.streamInput()); } try { parser.nextToken(); @@ -87,7 +87,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException { writeFieldName(fieldName); - SmileParser parser = SmileXContent.smileFactory.createJsonParser(content, offset, length); + SmileParser parser = SmileXContent.smileFactory.createParser(content, offset, length); try { parser.nextToken(); generator.copyCurrentStructure(parser); diff --git a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java index 3cbb9b6d411..636d977a6a0 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java +++ b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java @@ -60,32 +60,32 @@ public class YamlXContent implements XContent { @Override public XContentGenerator createGenerator(OutputStream os) throws IOException { - return new YamlXContentGenerator(yamlFactory.createJsonGenerator(os, JsonEncoding.UTF8)); + return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8)); } @Override public XContentGenerator createGenerator(Writer writer) throws IOException { - return new YamlXContentGenerator(yamlFactory.createJsonGenerator(writer)); + return new YamlXContentGenerator(yamlFactory.createGenerator(writer)); } @Override public XContentParser createParser(String content) throws IOException { - return new YamlXContentParser(yamlFactory.createJsonParser(new FastStringReader(content))); + return new YamlXContentParser(yamlFactory.createParser(new FastStringReader(content))); } @Override public XContentParser createParser(InputStream is) throws IOException { - return new YamlXContentParser(yamlFactory.createJsonParser(is)); + return new YamlXContentParser(yamlFactory.createParser(is)); } @Override public XContentParser createParser(byte[] data) throws IOException { - return new YamlXContentParser(yamlFactory.createJsonParser(data)); + return new YamlXContentParser(yamlFactory.createParser(data)); } @Override public XContentParser createParser(byte[] data, int offset, int length) throws IOException { - return new YamlXContentParser(yamlFactory.createJsonParser(data, offset, length)); + return new YamlXContentParser(yamlFactory.createParser(data, offset, length)); } @Override @@ -98,6 +98,6 @@ public class YamlXContent implements XContent { @Override public XContentParser createParser(Reader reader) throws IOException { - return new YamlXContentParser(yamlFactory.createJsonParser(reader)); + return new YamlXContentParser(yamlFactory.createParser(reader)); } } diff --git a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java index e084b0769c9..8926d36f9c5 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java +++ b/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java @@ -46,7 +46,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException { writeFieldName(fieldName); - YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content); + YAMLParser parser = YamlXContent.yamlFactory.createParser(content); try { parser.nextToken(); generator.copyCurrentStructure(parser); @@ -58,7 +58,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException { writeFieldName(fieldName); - YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content); + YAMLParser parser = YamlXContent.yamlFactory.createParser(content); try { parser.nextToken(); generator.copyCurrentStructure(parser); @@ -72,9 +72,9 @@ public class YamlXContentGenerator extends JsonXContentGenerator { writeFieldName(fieldName); YAMLParser parser; if (content.hasArray()) { - parser = YamlXContent.yamlFactory.createJsonParser(content.array(), content.arrayOffset(), content.length()); + parser = YamlXContent.yamlFactory.createParser(content.array(), content.arrayOffset(), content.length()); } else { - parser = YamlXContent.yamlFactory.createJsonParser(content.streamInput()); + parser = YamlXContent.yamlFactory.createParser(content.streamInput()); } try { parser.nextToken(); @@ -87,7 +87,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator { @Override public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException { writeFieldName(fieldName); - YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content, offset, length); + YAMLParser parser = YamlXContent.yamlFactory.createParser(content, offset, length); try { parser.nextToken(); generator.copyCurrentStructure(parser); diff --git a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java index d0b16d554d4..7386e702d7d 100644 --- a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -19,14 +19,12 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer; - -import org.elasticsearch.ElasticSearchIllegalArgumentException; - import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; +import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; +import org.elasticsearch.ElasticSearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -40,6 +38,7 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC /** * */ +@SuppressWarnings("deprecation") public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { private final int minGram; diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java index fa860b8865a..2025735b757 100644 --- a/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenFilterFactory.java @@ -46,6 +46,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); } + @SuppressWarnings("deprecation") @Override public TokenStream create(TokenStream tokenStream) { final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // we supported it since 4.3 diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java index 0572736dd3b..f5f31038bd5 100644 --- a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java @@ -19,10 +19,9 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer; - import com.google.common.collect.ImmutableMap; import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticSearchIllegalArgumentException; @@ -51,11 +50,11 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { static { ImmutableMap.Builder builder = ImmutableMap.builder(); - builder.put("letter", CharMatcher.Basic.LETTER); - builder.put("digit", CharMatcher.Basic.DIGIT); - builder.put("whitespace", CharMatcher.Basic.WHITESPACE); + builder.put("letter", CharMatcher.Basic.LETTER); + builder.put("digit", CharMatcher.Basic.DIGIT); + builder.put("whitespace", CharMatcher.Basic.WHITESPACE); builder.put("punctuation", CharMatcher.Basic.PUNCTUATION); - builder.put("symbol", CharMatcher.Basic.SYMBOL); + builder.put("symbol", CharMatcher.Basic.SYMBOL); // Populate with unicode categories from java.lang.Character for (Field field : Character.class.getFields()) { if (!field.getName().startsWith("DIRECTIONALITY") @@ -97,6 +96,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { this.matcher = parseTokenChars(settings.getAsArray("token_chars")); } + @SuppressWarnings("deprecation") @Override public Tokenizer create(Reader reader) { final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // we supported it since 4.3 diff --git a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java index a24934d6e04..a644a49b905 100644 --- a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -36,6 +36,7 @@ import java.util.Set; /** * */ +@SuppressWarnings("deprecation") public class StopTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet stopWords; @@ -49,7 +50,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { super(index, indexSettings, name, settings); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version, ignoreCase); - // LUCENE 4 UPGRADE: LUCENE_29 constant is no longer defined this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", version.onOrAfter(Version.LUCENE_30)); } diff --git a/src/test/java/org/elasticsearch/test/unit/common/lucene/all/SimpleAllTests.java b/src/test/java/org/elasticsearch/test/unit/common/lucene/all/SimpleAllTests.java index 0b393bda028..f94f697debd 100644 --- a/src/test/java/org/elasticsearch/test/unit/common/lucene/all/SimpleAllTests.java +++ b/src/test/java/org/elasticsearch/test/unit/common/lucene/all/SimpleAllTests.java @@ -22,10 +22,8 @@ package org.elasticsearch.test.unit.common.lucene.all; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.Term; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; @@ -83,7 +81,7 @@ public class SimpleAllTests { allEntries.addText("field1", "something", 1.0f); allEntries.addText("field2", "else", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); @@ -93,11 +91,11 @@ public class SimpleAllTests { allEntries.addText("field1", "else", 1.0f); allEntries.addText("field2", "something", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); @@ -124,7 +122,7 @@ public class SimpleAllTests { allEntries.addText("field1", "something", 1.0f); allEntries.addText("field2", "else", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); @@ -134,11 +132,11 @@ public class SimpleAllTests { allEntries.addText("field1", "else", 2.0f); allEntries.addText("field2", "something", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); // this one is boosted. so the second doc is more relevant @@ -166,7 +164,7 @@ public class SimpleAllTests { allEntries.addText("field1", "something moo", 1.0f); allEntries.addText("field2", "else koo", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); @@ -176,11 +174,11 @@ public class SimpleAllTests { allEntries.addText("field1", "else koo", 1.0f); allEntries.addText("field2", "something moo", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); @@ -217,7 +215,7 @@ public class SimpleAllTests { allEntries.addText("field1", "something moo", 1.0f); allEntries.addText("field2", "else koo", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); @@ -227,11 +225,11 @@ public class SimpleAllTests { allEntries.addText("field1", "else koo", 2.0f); allEntries.addText("field2", "something moo", 1.0f); allEntries.reset(); - doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); indexWriter.addDocument(doc); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10); diff --git a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MatchAllDocsFilterTests.java b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MatchAllDocsFilterTests.java index 44d7aeae96d..7281e3d0230 100644 --- a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MatchAllDocsFilterTests.java +++ b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MatchAllDocsFilterTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -56,7 +57,7 @@ public class MatchAllDocsFilterTests { document.add(new TextField("text", "lucene release", Field.Store.YES)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); XConstantScoreQuery query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER); diff --git a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MoreLikeThisQueryTests.java b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MoreLikeThisQueryTests.java index 371bfe13877..41233bc15d4 100644 --- a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MoreLikeThisQueryTests.java +++ b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MoreLikeThisQueryTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -57,7 +58,7 @@ public class MoreLikeThisQueryTests { document.add(new TextField("text", "lucene release", Field.Store.YES)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER); diff --git a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MultiPhrasePrefixQueryTests.java b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MultiPhrasePrefixQueryTests.java index f167fcb3571..e38c7d24691 100644 --- a/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MultiPhrasePrefixQueryTests.java +++ b/src/test/java/org/elasticsearch/test/unit/common/lucene/search/MultiPhrasePrefixQueryTests.java @@ -22,10 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.lucene.Lucene; @@ -43,7 +40,7 @@ public class MultiPhrasePrefixQueryTests { Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED)); writer.addDocument(doc); - IndexReader reader = IndexReader.open(writer, true); + IndexReader reader = DirectoryReader.open(writer, true); IndexSearcher searcher = new IndexSearcher(reader); MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery(); diff --git a/src/test/java/org/elasticsearch/test/unit/deps/jackson/JacksonLocationTests.java b/src/test/java/org/elasticsearch/test/unit/deps/jackson/JacksonLocationTests.java index cf6b2a753f8..461a4d7c493 100644 --- a/src/test/java/org/elasticsearch/test/unit/deps/jackson/JacksonLocationTests.java +++ b/src/test/java/org/elasticsearch/test/unit/deps/jackson/JacksonLocationTests.java @@ -45,7 +45,7 @@ public class JacksonLocationTests { // } // } BytesStreamOutput os = new BytesStreamOutput(); - JsonGenerator gen = new JsonFactory().createJsonGenerator(os); + JsonGenerator gen = new JsonFactory().createGenerator(os); gen.writeStartObject(); gen.writeStringField("index", "test"); @@ -60,7 +60,7 @@ public class JacksonLocationTests { gen.close(); byte[] data = os.bytes().toBytes(); - JsonParser parser = new JsonFactory().createJsonParser(data); + JsonParser parser = new JsonFactory().createParser(data); assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT)); assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "index" diff --git a/src/test/java/org/elasticsearch/test/unit/deps/lucene/VectorHighlighterTests.java b/src/test/java/org/elasticsearch/test/unit/deps/lucene/VectorHighlighterTests.java index f4e56628fb5..dff3ae5776d 100644 --- a/src/test/java/org/elasticsearch/test/unit/deps/lucene/VectorHighlighterTests.java +++ b/src/test/java/org/elasticsearch/test/unit/deps/lucene/VectorHighlighterTests.java @@ -22,10 +22,7 @@ package org.elasticsearch.test.unit.deps.lucene; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.vectorhighlight.CustomFieldQuery; import org.apache.lucene.search.vectorhighlight.XFastVectorHighlighter; @@ -52,7 +49,7 @@ public class VectorHighlighterTests { document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -75,7 +72,7 @@ public class VectorHighlighterTests { document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -113,7 +110,7 @@ public class VectorHighlighterTests { document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); @@ -135,7 +132,7 @@ public class VectorHighlighterTests { document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); indexWriter.addDocument(document); - IndexReader reader = IndexReader.open(indexWriter, true); + IndexReader reader = DirectoryReader.open(indexWriter, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); diff --git a/src/test/java/org/elasticsearch/test/unit/index/cache/filter/FilterCacheTests.java b/src/test/java/org/elasticsearch/test/unit/index/cache/filter/FilterCacheTests.java index c41496528b8..d11ba36ff90 100644 --- a/src/test/java/org/elasticsearch/test/unit/index/cache/filter/FilterCacheTests.java +++ b/src/test/java/org/elasticsearch/test/unit/index/cache/filter/FilterCacheTests.java @@ -58,7 +58,7 @@ public class FilterCacheTests { private void verifyCache(FilterCache filterCache) throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)); - DirectoryReader reader = IndexReader.open(indexWriter, true); + DirectoryReader reader = DirectoryReader.open(indexWriter, true); for (int i = 0; i < 100; i++) { Document document = new Document(); diff --git a/src/test/java/org/elasticsearch/test/unit/index/deletionpolicy/SnapshotDeletionPolicyTests.java b/src/test/java/org/elasticsearch/test/unit/index/deletionpolicy/SnapshotDeletionPolicyTests.java index 3ad38984779..136c06d62cf 100644 --- a/src/test/java/org/elasticsearch/test/unit/index/deletionpolicy/SnapshotDeletionPolicyTests.java +++ b/src/test/java/org/elasticsearch/test/unit/index/deletionpolicy/SnapshotDeletionPolicyTests.java @@ -25,7 +25,6 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.Index; import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; @@ -44,8 +43,6 @@ import static org.hamcrest.Matchers.equalTo; /** * A set of tests for {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy}. - * - * */ public class SnapshotDeletionPolicyTests { @@ -60,7 +57,7 @@ public class SnapshotDeletionPolicyTests { dir = new RAMDirectory(); deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS)); // LUCENE 4 UPGRADE: Not sure about version. - indexWriter = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_31, Lucene.STANDARD_ANALYZER) + indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER) .setIndexDeletionPolicy(deletionPolicy) .setOpenMode(IndexWriterConfig.OpenMode.CREATE)); } @@ -70,7 +67,7 @@ public class SnapshotDeletionPolicyTests { indexWriter.close(); dir.close(); } - + private Document testDocument() { Document document = new Document(); document.add(new TextField("test", "1", Field.Store.YES)); diff --git a/src/test/java/org/elasticsearch/test/unit/index/mapper/lucene/DoubleIndexingDocTest.java b/src/test/java/org/elasticsearch/test/unit/index/mapper/lucene/DoubleIndexingDocTest.java index e4bbf5a9880..8d4e891199f 100644 --- a/src/test/java/org/elasticsearch/test/unit/index/mapper/lucene/DoubleIndexingDocTest.java +++ b/src/test/java/org/elasticsearch/test/unit/index/mapper/lucene/DoubleIndexingDocTest.java @@ -1,5 +1,6 @@ package org.elasticsearch.test.unit.index.mapper.lucene; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -43,7 +44,7 @@ public class DoubleIndexingDocTest { writer.addDocument(doc.rootDoc(), doc.analyzer()); writer.addDocument(doc.rootDoc(), doc.analyzer()); - IndexReader reader = IndexReader.open(writer, true); + IndexReader reader = DirectoryReader.open(writer, true); IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(mapper.mappers().smartName("field1").mapper().termQuery("value1", null), 10);