Use FieldType and not deprecated Field construction

This commit is contained in:
Shay Banon 2012-12-28 14:27:09 -08:00
parent 64a01c28c3
commit e02015c641
9 changed files with 43 additions and 30 deletions

View File

@ -49,6 +49,7 @@ public class UidFieldMapper extends AbstractFieldMapper<Uid> implements Internal
public static final String NAME = UidFieldMapper.NAME;
public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
public static final FieldType NESTED_FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE);
static {
FIELD_TYPE.setIndexed(true);
@ -57,6 +58,14 @@ public class UidFieldMapper extends AbstractFieldMapper<Uid> implements Internal
FIELD_TYPE.setOmitNorms(true);
FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); // we store payload (otherwise, we really need just docs)
FIELD_TYPE.freeze();
NESTED_FIELD_TYPE.setIndexed(true);
NESTED_FIELD_TYPE.setTokenized(false);
NESTED_FIELD_TYPE.setStored(false);
NESTED_FIELD_TYPE.setOmitNorms(true);
// we can set this to another index option when we move away from storing payload..
//NESTED_FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
NESTED_FIELD_TYPE.freeze();
}
}
@ -148,7 +157,7 @@ public class UidFieldMapper extends AbstractFieldMapper<Uid> implements Internal
// we need to go over the docs and add it...
for (int i = 1; i < context.docs().size(); i++) {
// we don't need to add it as a full uid field in nested docs, since we don't need versioning
context.docs().get(i).add(new Field(UidFieldMapper.NAME, uidField.uid(), Field.Store.NO, Field.Index.NOT_ANALYZED));
context.docs().get(i).add(new Field(UidFieldMapper.NAME, uidField.uid(), Defaults.NESTED_FIELD_TYPE));
}
}
}

View File

@ -420,22 +420,22 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll {
if (nested.isNested()) {
Document nestedDoc = new Document();
// pre add the uid field if possible (id was already provided)
Field uidField = (Field) context.doc().getField(UidFieldMapper.NAME);
IndexableField uidField = context.doc().getField(UidFieldMapper.NAME);
if (uidField != null) {
// we don't need to add it as a full uid field in nested docs, since we don't need versioning
// we also rely on this for UidField#loadVersion
// this is a deeply nested field
if (uidField.stringValue() != null) {
nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), Field.Store.NO, Field.Index.NOT_ANALYZED));
nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
} else {
nestedDoc.add(new Field(UidFieldMapper.NAME, ((UidField) uidField).uid(), Field.Store.NO, Field.Index.NOT_ANALYZED));
nestedDoc.add(new Field(UidFieldMapper.NAME, ((UidField) uidField).uid(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
}
}
// the type of the nested doc starts with __, so we can identify that its a nested one in filters
// note, we don't prefix it with the type of the doc since it allows us to execute a nested query
// across types (for example, with similar nested objects)
nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePathAsString, Field.Store.NO, Field.Index.NOT_ANALYZED));
nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePathAsString, TypeFieldMapper.Defaults.FIELD_TYPE));
restoreDoc = context.switchDoc(nestedDoc);
context.addDoc(nestedDoc);
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search.highlight.vectorhighlight;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
@ -57,7 +58,7 @@ public class SourceScoreOrderFragmentsBuilder extends XScoreOrderFragmentsBuilde
List<Object> values = lookup.source().extractRawValues(mapper.names().sourcePath());
Field[] fields = new Field[values.size()];
for (int i = 0; i < values.size(); i++) {
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), Field.Store.NO, Field.Index.ANALYZED);
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
}
return fields;
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search.highlight.vectorhighlight;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
@ -62,7 +63,7 @@ public class SourceSimpleFragmentsBuilder extends XSimpleFragmentsBuilder {
}
Field[] fields = new Field[values.size()];
for (int i = 0; i < values.size(); i++) {
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), Field.Store.NO, Field.Index.ANALYZED);
fields[i] = new Field(mapper.names().indexName(), values.get(i).toString(), TextField.TYPE_NOT_STORED);
}
return fields;
}

View File

@ -20,9 +20,7 @@
package org.elasticsearch.test.unit.common.compress;
import jsr166y.ThreadLocalRandom;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.Bits;
@ -297,9 +295,9 @@ public class CompressIndexInputOutputTests {
private Document createDoc(int id, int size) {
Document doc = new Document();
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("size", Integer.toString(size), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("skip", RandomStringGenerator.random(50), Field.Store.YES, Field.Index.NO));
doc.add(new Field("id", Integer.toString(id), StringField.TYPE_STORED));
doc.add(new Field("size", Integer.toString(size), StringField.TYPE_STORED));
doc.add(new Field("skip", RandomStringGenerator.random(50), StoredField.TYPE));
StringBuilder sb = new StringBuilder();
int count = 0;
while (true) {
@ -310,9 +308,9 @@ public class CompressIndexInputOutputTests {
break;
}
}
doc.add(new Field("count", Integer.toString(count), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("field", sb.toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("skip", RandomStringGenerator.random(50), Field.Store.YES, Field.Index.NO));
doc.add(new Field("count", Integer.toString(count), StringField.TYPE_STORED));
doc.add(new Field("field", sb.toString(), StringField.TYPE_STORED));
doc.add(new Field("skip", RandomStringGenerator.random(50), StoredField.TYPE));
return doc;
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.test.unit.common.lucene.all;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -78,7 +79,7 @@ public class SimpleAllTests {
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "1", StoredField.TYPE));
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", "something", 1.0f);
allEntries.addText("field2", "else", 1.0f);
@ -88,7 +89,7 @@ public class SimpleAllTests {
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("_id", "2", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "2", StoredField.TYPE));
allEntries = new AllEntries();
allEntries.addText("field1", "else", 1.0f);
allEntries.addText("field2", "something", 1.0f);
@ -119,7 +120,7 @@ public class SimpleAllTests {
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "1", StoredField.TYPE));
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", "something", 1.0f);
allEntries.addText("field2", "else", 1.0f);
@ -129,7 +130,7 @@ public class SimpleAllTests {
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("_id", "2", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "2", StoredField.TYPE));
allEntries = new AllEntries();
allEntries.addText("field1", "else", 2.0f);
allEntries.addText("field2", "something", 1.0f);
@ -161,7 +162,7 @@ public class SimpleAllTests {
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "1", StoredField.TYPE));
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", "something moo", 1.0f);
allEntries.addText("field2", "else koo", 1.0f);
@ -171,7 +172,7 @@ public class SimpleAllTests {
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("_id", "2", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "2", StoredField.TYPE));
allEntries = new AllEntries();
allEntries.addText("field1", "else koo", 1.0f);
allEntries.addText("field2", "something moo", 1.0f);
@ -212,7 +213,7 @@ public class SimpleAllTests {
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "1", StoredField.TYPE));
AllEntries allEntries = new AllEntries();
allEntries.addText("field1", "something moo", 1.0f);
allEntries.addText("field2", "else koo", 1.0f);
@ -222,7 +223,7 @@ public class SimpleAllTests {
indexWriter.addDocument(doc);
doc = new Document();
doc.add(new Field("_id", "2", Field.Store.YES, Field.Index.NO));
doc.add(new Field("_id", "2", StoredField.TYPE));
allEntries = new AllEntries();
allEntries.addText("field1", "else koo", 2.0f);
allEntries.addText("field2", "something moo", 1.0f);

View File

@ -21,6 +21,7 @@ package org.elasticsearch.test.unit.common.lucene.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -41,7 +42,7 @@ public class MultiPhrasePrefixQueryTests {
public void simpleTests() throws Exception {
IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
writer.addDocument(doc);
IndexReader reader = IndexReader.open(writer, true);
IndexSearcher searcher = new IndexSearcher(reader);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.test.unit.common.lucene.search;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@ -48,8 +49,8 @@ public class TermsFilterTests {
for (int i = 0; i < 100; i++) {
Document doc = new Document();
int term = i * 10; //terms are units of 10;
doc.add(new Field(fieldName, "" + term, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("all", "xxx", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
w.addDocument(doc);
if ((i % 40) == 0) {
w.commit();
@ -82,8 +83,8 @@ public class TermsFilterTests {
for (int i = 0; i < 100; i++) {
Document doc = new Document();
int term = i * 10; //terms are units of 10;
doc.add(new Field(fieldName, "" + term, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("all", "xxx", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field(fieldName, "" + term, StringField.TYPE_NOT_STORED));
doc.add(new Field("all", "xxx", StringField.TYPE_NOT_STORED));
w.addDocument(doc);
if ((i % 40) == 0) {
w.commit();

View File

@ -5,6 +5,7 @@ import com.spatial4j.core.shape.Shape;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -64,7 +65,7 @@ public class TermQueryPrefixTreeStrategyTests {
private Document newDocument(String id, Shape shape) {
Document document = new Document();
document.add(new Field("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED));
document.add(new Field("id", id, StringField.TYPE_STORED));
document.add(STRATEGY.createField(shape));
return document;
}