cleanup deprecations / warnings

This commit is contained in:
Shay Banon 2013-07-29 01:55:41 +02:00
parent 4e66658aac
commit 2c2cc844dc
20 changed files with 79 additions and 82 deletions

View File

@ -31,6 +31,7 @@ import java.io.Serializable;
/**
*/
@SuppressWarnings("deprecation")
public class Version implements Serializable {
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator

View File

@ -21,7 +21,10 @@ package org.elasticsearch.common.lucene;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.*;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@ -52,6 +55,7 @@ public class Lucene {
public static ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
@SuppressWarnings("deprecation")
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
if (version == null) {
return defaultVersion;
@ -359,7 +363,7 @@ public class Lucene {
private Lucene() {
}
public static final boolean indexExists(final Directory directory) throws IOException {
return DirectoryReader.indexExists(directory);
}

View File

@ -64,32 +64,32 @@ public class JsonXContent implements XContent {
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new JsonXContentGenerator(jsonFactory.createJsonGenerator(os, JsonEncoding.UTF8));
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return new JsonXContentGenerator(jsonFactory.createJsonGenerator(writer));
return new JsonXContentGenerator(jsonFactory.createGenerator(writer));
}
@Override
public XContentParser createParser(String content) throws IOException {
return new JsonXContentParser(jsonFactory.createJsonParser(new FastStringReader(content)));
return new JsonXContentParser(jsonFactory.createParser(new FastStringReader(content)));
}
@Override
public XContentParser createParser(InputStream is) throws IOException {
return new JsonXContentParser(jsonFactory.createJsonParser(is));
return new JsonXContentParser(jsonFactory.createParser(is));
}
@Override
public XContentParser createParser(byte[] data) throws IOException {
return new JsonXContentParser(jsonFactory.createJsonParser(data));
return new JsonXContentParser(jsonFactory.createParser(data));
}
@Override
public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
return new JsonXContentParser(jsonFactory.createJsonParser(data, offset, length));
return new JsonXContentParser(jsonFactory.createParser(data, offset, length));
}
@Override

View File

@ -62,32 +62,32 @@ public class SmileXContent implements XContent {
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new SmileXContentGenerator(smileFactory.createJsonGenerator(os, JsonEncoding.UTF8));
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return new SmileXContentGenerator(smileFactory.createJsonGenerator(writer));
return new SmileXContentGenerator(smileFactory.createGenerator(writer));
}
@Override
public XContentParser createParser(String content) throws IOException {
return new SmileXContentParser(smileFactory.createJsonParser(new FastStringReader(content)));
return new SmileXContentParser(smileFactory.createParser(new FastStringReader(content)));
}
@Override
public XContentParser createParser(InputStream is) throws IOException {
return new SmileXContentParser(smileFactory.createJsonParser(is));
return new SmileXContentParser(smileFactory.createParser(is));
}
@Override
public XContentParser createParser(byte[] data) throws IOException {
return new SmileXContentParser(smileFactory.createJsonParser(data));
return new SmileXContentParser(smileFactory.createParser(data));
}
@Override
public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
return new SmileXContentParser(smileFactory.createJsonParser(data, offset, length));
return new SmileXContentParser(smileFactory.createParser(data, offset, length));
}
@Override

View File

@ -46,7 +46,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
writeFieldName(fieldName);
SmileParser parser = SmileXContent.smileFactory.createJsonParser(content);
SmileParser parser = SmileXContent.smileFactory.createParser(content);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);
@ -58,7 +58,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
writeFieldName(fieldName);
SmileParser parser = SmileXContent.smileFactory.createJsonParser(content);
SmileParser parser = SmileXContent.smileFactory.createParser(content);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);
@ -72,9 +72,9 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
writeFieldName(fieldName);
SmileParser parser;
if (content.hasArray()) {
parser = SmileXContent.smileFactory.createJsonParser(content.array(), content.arrayOffset(), content.length());
parser = SmileXContent.smileFactory.createParser(content.array(), content.arrayOffset(), content.length());
} else {
parser = SmileXContent.smileFactory.createJsonParser(content.streamInput());
parser = SmileXContent.smileFactory.createParser(content.streamInput());
}
try {
parser.nextToken();
@ -87,7 +87,7 @@ public class SmileXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
writeFieldName(fieldName);
SmileParser parser = SmileXContent.smileFactory.createJsonParser(content, offset, length);
SmileParser parser = SmileXContent.smileFactory.createParser(content, offset, length);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);

View File

@ -60,32 +60,32 @@ public class YamlXContent implements XContent {
@Override
public XContentGenerator createGenerator(OutputStream os) throws IOException {
return new YamlXContentGenerator(yamlFactory.createJsonGenerator(os, JsonEncoding.UTF8));
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8));
}
@Override
public XContentGenerator createGenerator(Writer writer) throws IOException {
return new YamlXContentGenerator(yamlFactory.createJsonGenerator(writer));
return new YamlXContentGenerator(yamlFactory.createGenerator(writer));
}
@Override
public XContentParser createParser(String content) throws IOException {
return new YamlXContentParser(yamlFactory.createJsonParser(new FastStringReader(content)));
return new YamlXContentParser(yamlFactory.createParser(new FastStringReader(content)));
}
@Override
public XContentParser createParser(InputStream is) throws IOException {
return new YamlXContentParser(yamlFactory.createJsonParser(is));
return new YamlXContentParser(yamlFactory.createParser(is));
}
@Override
public XContentParser createParser(byte[] data) throws IOException {
return new YamlXContentParser(yamlFactory.createJsonParser(data));
return new YamlXContentParser(yamlFactory.createParser(data));
}
@Override
public XContentParser createParser(byte[] data, int offset, int length) throws IOException {
return new YamlXContentParser(yamlFactory.createJsonParser(data, offset, length));
return new YamlXContentParser(yamlFactory.createParser(data, offset, length));
}
@Override
@ -98,6 +98,6 @@ public class YamlXContent implements XContent {
@Override
public XContentParser createParser(Reader reader) throws IOException {
return new YamlXContentParser(yamlFactory.createJsonParser(reader));
return new YamlXContentParser(yamlFactory.createParser(reader));
}
}

View File

@ -46,7 +46,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, InputStream content, OutputStream bos) throws IOException {
writeFieldName(fieldName);
YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content);
YAMLParser parser = YamlXContent.yamlFactory.createParser(content);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);
@ -58,7 +58,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, byte[] content, OutputStream bos) throws IOException {
writeFieldName(fieldName);
YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content);
YAMLParser parser = YamlXContent.yamlFactory.createParser(content);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);
@ -72,9 +72,9 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
writeFieldName(fieldName);
YAMLParser parser;
if (content.hasArray()) {
parser = YamlXContent.yamlFactory.createJsonParser(content.array(), content.arrayOffset(), content.length());
parser = YamlXContent.yamlFactory.createParser(content.array(), content.arrayOffset(), content.length());
} else {
parser = YamlXContent.yamlFactory.createJsonParser(content.streamInput());
parser = YamlXContent.yamlFactory.createParser(content.streamInput());
}
try {
parser.nextToken();
@ -87,7 +87,7 @@ public class YamlXContentGenerator extends JsonXContentGenerator {
@Override
public void writeRawField(String fieldName, byte[] content, int offset, int length, OutputStream bos) throws IOException {
writeFieldName(fieldName);
YAMLParser parser = YamlXContent.yamlFactory.createJsonParser(content, offset, length);
YAMLParser parser = YamlXContent.yamlFactory.createParser(content, offset, length);
try {
parser.nextToken();
generator.copyCurrentStructure(parser);

View File

@ -19,14 +19,12 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
@ -40,6 +38,7 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC
/**
*
*/
@SuppressWarnings("deprecation")
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
private final int minGram;

View File

@ -46,6 +46,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
}
@SuppressWarnings("deprecation")
@Override
public TokenStream create(TokenStream tokenStream) {
final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // we supported it since 4.3

View File

@ -19,10 +19,9 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
@ -51,11 +50,11 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
static {
ImmutableMap.Builder<String, CharMatcher> builder = ImmutableMap.builder();
builder.put("letter", CharMatcher.Basic.LETTER);
builder.put("digit", CharMatcher.Basic.DIGIT);
builder.put("whitespace", CharMatcher.Basic.WHITESPACE);
builder.put("letter", CharMatcher.Basic.LETTER);
builder.put("digit", CharMatcher.Basic.DIGIT);
builder.put("whitespace", CharMatcher.Basic.WHITESPACE);
builder.put("punctuation", CharMatcher.Basic.PUNCTUATION);
builder.put("symbol", CharMatcher.Basic.SYMBOL);
builder.put("symbol", CharMatcher.Basic.SYMBOL);
// Populate with unicode categories from java.lang.Character
for (Field field : Character.class.getFields()) {
if (!field.getName().startsWith("DIRECTIONALITY")
@ -97,6 +96,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
}
@SuppressWarnings("deprecation")
@Override
public Tokenizer create(Reader reader) {
final Version version = this.version == Version.LUCENE_43 ? Version.LUCENE_44 : this.version; // we supported it since 4.3

View File

@ -36,6 +36,7 @@ import java.util.Set;
/**
*
*/
@SuppressWarnings("deprecation")
public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet stopWords;
@ -49,7 +50,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
super(index, indexSettings, name, settings);
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, version, ignoreCase);
// LUCENE 4 UPGRADE: LUCENE_29 constant is no longer defined
this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", version.onOrAfter(Version.LUCENE_30));
}

View File

@ -22,10 +22,8 @@ package org.elasticsearch.test.unit.common.lucene.all;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.*;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
@ -83,7 +81,7 @@ public class SimpleAllTests {
allEntries.addText("field1", "something", 1.0f);
allEntries.addText("field2", "else", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
@ -93,11 +91,11 @@ public class SimpleAllTests {
allEntries.addText("field1", "else", 1.0f);
allEntries.addText("field2", "something", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
@ -124,7 +122,7 @@ public class SimpleAllTests {
allEntries.addText("field1", "something", 1.0f);
allEntries.addText("field2", "else", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
@ -134,11 +132,11 @@ public class SimpleAllTests {
allEntries.addText("field1", "else", 2.0f);
allEntries.addText("field2", "something", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
// this one is boosted. so the second doc is more relevant
@ -166,7 +164,7 @@ public class SimpleAllTests {
allEntries.addText("field1", "something moo", 1.0f);
allEntries.addText("field2", "else koo", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
@ -176,11 +174,11 @@ public class SimpleAllTests {
allEntries.addText("field1", "else koo", 1.0f);
allEntries.addText("field2", "something moo", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
@ -217,7 +215,7 @@ public class SimpleAllTests {
allEntries.addText("field1", "something moo", 1.0f);
allEntries.addText("field2", "else koo", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
@ -227,11 +225,11 @@ public class SimpleAllTests {
allEntries.addText("field1", "else koo", 2.0f);
allEntries.addText("field2", "something moo", 1.0f);
allEntries.reset();
doc.add(new Field("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -56,7 +57,7 @@ public class MatchAllDocsFilterTests {
document.add(new TextField("text", "lucene release", Field.Store.YES));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
XConstantScoreQuery query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -57,7 +58,7 @@ public class MoreLikeThisQueryTests {
document.add(new TextField("text", "lucene release", Field.Store.YES));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER);

View File

@ -22,10 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.*;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.elasticsearch.common.lucene.Lucene;
@ -43,7 +40,7 @@ public class MultiPhrasePrefixQueryTests {
Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
writer.addDocument(doc);
IndexReader reader = IndexReader.open(writer, true);
IndexReader reader = DirectoryReader.open(writer, true);
IndexSearcher searcher = new IndexSearcher(reader);
MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();

View File

@ -45,7 +45,7 @@ public class JacksonLocationTests {
// }
// }
BytesStreamOutput os = new BytesStreamOutput();
JsonGenerator gen = new JsonFactory().createJsonGenerator(os);
JsonGenerator gen = new JsonFactory().createGenerator(os);
gen.writeStartObject();
gen.writeStringField("index", "test");
@ -60,7 +60,7 @@ public class JacksonLocationTests {
gen.close();
byte[] data = os.bytes().toBytes();
JsonParser parser = new JsonFactory().createJsonParser(data);
JsonParser parser = new JsonFactory().createParser(data);
assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT));
assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "index"

View File

@ -22,10 +22,7 @@ package org.elasticsearch.test.unit.deps.lucene;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
import org.apache.lucene.search.vectorhighlight.XFastVectorHighlighter;
@ -52,7 +49,7 @@ public class VectorHighlighterTests {
document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
@ -75,7 +72,7 @@ public class VectorHighlighterTests {
document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
@ -113,7 +110,7 @@ public class VectorHighlighterTests {
document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
@ -135,7 +132,7 @@ public class VectorHighlighterTests {
document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
indexWriter.addDocument(document);
IndexReader reader = IndexReader.open(indexWriter, true);
IndexReader reader = DirectoryReader.open(indexWriter, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

View File

@ -58,7 +58,7 @@ public class FilterCacheTests {
private void verifyCache(FilterCache filterCache) throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
DirectoryReader reader = IndexReader.open(indexWriter, true);
DirectoryReader reader = DirectoryReader.open(indexWriter, true);
for (int i = 0; i < 100; i++) {
Document document = new Document();

View File

@ -25,7 +25,6 @@ import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
@ -44,8 +43,6 @@ import static org.hamcrest.Matchers.equalTo;
/**
* A set of tests for {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy}.
*
*
*/
public class SnapshotDeletionPolicyTests {
@ -60,7 +57,7 @@ public class SnapshotDeletionPolicyTests {
dir = new RAMDirectory();
deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
// LUCENE 4 UPGRADE: Not sure about version.
indexWriter = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_31, Lucene.STANDARD_ANALYZER)
indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)
.setIndexDeletionPolicy(deletionPolicy)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE));
}
@ -70,7 +67,7 @@ public class SnapshotDeletionPolicyTests {
indexWriter.close();
dir.close();
}
private Document testDocument() {
Document document = new Document();
document.add(new TextField("test", "1", Field.Store.YES));

View File

@ -1,5 +1,6 @@
package org.elasticsearch.test.unit.index.mapper.lucene;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -43,7 +44,7 @@ public class DoubleIndexingDocTest {
writer.addDocument(doc.rootDoc(), doc.analyzer());
writer.addDocument(doc.rootDoc(), doc.analyzer());
IndexReader reader = IndexReader.open(writer, true);
IndexReader reader = DirectoryReader.open(writer, true);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(mapper.mappers().smartName("field1").mapper().termQuery("value1", null), 10);