From 8d28270460fc84ea2bf34dc47bd8fe3da2b35bc0 Mon Sep 17 00:00:00 2001 From: Christopher John Male Date: Wed, 28 Sep 2011 08:07:16 +0000 Subject: [PATCH] LUCENE-3470: Changed Field constructor signatures order to value, fieldtype git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1176773 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/MIGRATE.txt | 10 ++-- .../org/apache/lucene/demo/IndexFiles.java | 2 +- .../demo/xmlparser/FormBasedXmlQueryDemo.java | 8 +-- .../vectorhighlight/BaseFragmentsBuilder.java | 2 +- .../highlight/HighlighterPhraseTest.java | 10 ++-- .../search/highlight/HighlighterTest.java | 4 +- .../search/highlight/TokenSourcesTest.java | 8 +-- .../vectorhighlight/AbstractTestCase.java | 4 +- .../SimpleFragmentsBuilderTest.java | 2 +- .../store/instantiated/TestIndicesEquals.java | 16 ++--- .../lucene/document/FieldSelectorVisitor.java | 2 +- .../lucene/index/TestIndexSplitter.java | 4 +- .../apache/lucene/document/BinaryField.java | 6 +- .../document/DocumentStoredFieldVisitor.java | 4 +- .../org/apache/lucene/document/Field.java | 16 ++--- .../apache/lucene/document/StringField.java | 2 +- .../org/apache/lucene/document/TextField.java | 6 +- .../PersistentSnapshotDeletionPolicy.java | 6 +- .../lucene/analysis/CollationTestBase.java | 12 ++-- .../org/apache/lucene/index/DocHelper.java | 34 +++++------ .../org/apache/lucene/util/LineFileDocs.java | 8 +-- .../apache/lucene/util/LuceneTestCase.java | 4 +- .../org/apache/lucene/util/_TestUtil.java | 5 +- .../lucene/document/TestBinaryDocument.java | 2 +- .../apache/lucene/document/TestDocument.java | 28 ++++----- .../apache/lucene/index/Test2BPostings.java | 2 +- .../org/apache/lucene/index/Test2BTerms.java | 2 +- .../apache/lucene/index/TestAddIndexes.java | 4 +- .../apache/lucene/index/TestAtomicUpdate.java | 2 +- .../index/TestBackwardsCompatibility.java | 14 ++--- .../apache/lucene/index/TestBinaryTerms.java | 2 +- .../index/TestConsistentFieldNumbers.java | 58 +++++++++---------- .../apache/lucene/index/TestFieldsReader.java | 4 +- .../lucene/index/TestGlobalFieldNumbers.java | 36 ++++++------ .../apache/lucene/index/TestIndexReader.java | 38 ++++++------ .../lucene/index/TestIndexReaderReopen.java | 10 ++-- .../apache/lucene/index/TestIndexWriter.java | 16 ++--- .../index/TestIndexWriterExceptions.java | 4 +- .../apache/lucene/index/TestOmitNorms.java | 10 ++-- .../org/apache/lucene/index/TestPayloads.java | 4 +- .../index/TestSameTokenSamePosition.java | 4 +- .../apache/lucene/index/TestSegmentInfo.java | 2 +- .../lucene/index/TestTermVectorsReader.java | 2 +- .../lucene/index/TestTermVectorsWriter.java | 2 +- .../index/values/TestDocValuesIndexing.java | 2 +- .../lucene/search/TestSloppyPhraseQuery.java | 4 +- .../org/apache/lucene/search/TestSort.java | 6 +- .../apache/lucene/search/TestTermVectors.java | 4 +- .../lucene/search/payloads/PayloadHelper.java | 6 +- .../analysis/core/TestKeywordAnalyzer.java | 8 +-- .../query/QueryAutoStopWordAnalyzerTest.java | 4 +- .../shingle/ShingleAnalyzerWrapperTest.java | 6 +- .../sinks/TestTeeSinkTokenFilter.java | 4 +- .../benchmark/byTask/feeds/DocMaker.java | 14 ++--- .../association/AssociationIndexer.java | 2 +- .../facet/example/multiCL/MultiCLIndexer.java | 2 +- .../facet/example/simple/SimpleIndexer.java | 2 +- .../facet/index/CategoryDocumentBuilder.java | 2 +- .../taxonomy/lucene/LuceneTaxonomyWriter.java | 4 +- .../apache/lucene/facet/FacetTestBase.java | 2 +- .../apache/lucene/facet/FacetTestUtils.java | 2 +- .../TestTopKInEachNodeResultHandler.java | 2 +- .../facet/util/TestScoredDocIDsUtils.java | 2 +- .../grouping/TermAllGroupsCollectorTest.java | 40 ++++++------- .../lucene/search/grouping/TestGrouping.java | 40 ++++++------- .../lucene/search/spell/SpellChecker.java | 4 +- .../org/apache/solr/schema/FieldType.java | 2 +- .../solr/core/TestArbitraryIndexDir.java | 4 +- .../test/org/apache/solr/search/TestSort.java | 4 +- .../spelling/IndexBasedSpellCheckerTest.java | 2 +- 70 files changed, 295 insertions(+), 300 deletions(-) diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt index be1dc184ea0..20b8827ad83 100644 --- a/lucene/MIGRATE.txt +++ b/lucene/MIGRATE.txt @@ -437,7 +437,7 @@ If your usage fits one of those common cases you can simply instantiate the above class. To use the TYPE_STORED variant, do this instead: - Field f = new Field("field", StringField.TYPE_STORED, "value"); + Field f = new Field("field", "value", StringField.TYPE_STORED); Alternatively, if an existing type is close to what you want but you need to make a few changes, you can copy that type and make changes: @@ -472,7 +472,7 @@ If instead the value was stored: you can now do this: - new Field("field", StringField.TYPE_STORED, "value") + new Field("field", "value", StringField.TYPE_STORED) If you didn't omit norms: @@ -482,7 +482,7 @@ you can now do this: FieldType ft = new FieldType(StringField.TYPE_STORED); ft.setOmitNorms(false); - new Field("field", ft, "value") + new Field("field", "value", ft) If you did this before (value can be String or Reader): @@ -498,7 +498,7 @@ If instead the value was stored: you can now do this: - new Field("field", TextField.TYPE_STORED, value) + new Field("field", value, TextField.TYPE_STORED) If in addition you omit norms: @@ -508,7 +508,7 @@ you can now do this: FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setOmitNorms(true); - new Field("field", ft, value) + new Field("field", value, ft) If you did this before (bytes is a byte[]): diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java index 5c578dd637c..2b7dff05da7 100644 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java +++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java @@ -174,7 +174,7 @@ public class IndexFiles { // field that is indexed (i.e. searchable), but don't tokenize // the field into separate words and don't index term frequency // or positional information: - Field pathField = new Field("path", StringField.TYPE_STORED, file.getPath()); + Field pathField = new Field("path", file.getPath(), StringField.TYPE_STORED); doc.add(pathField); // Add the last modified date of the file a field named "modified". diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java index 5f1bb702bca..734f8df7197 100644 --- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java +++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/FormBasedXmlQueryDemo.java @@ -134,10 +134,10 @@ public class FormBasedXmlQueryDemo extends HttpServlet { //parse row and create a document StringTokenizer st = new StringTokenizer(line, "\t"); Document doc = new Document(); - doc.add(new Field("location", textNoNorms, st.nextToken())); - doc.add(new Field("salary", textNoNorms, st.nextToken())); - doc.add(new Field("type", textNoNorms, st.nextToken())); - doc.add(new Field("description", textNoNorms, st.nextToken())); + doc.add(new Field("location", st.nextToken(), textNoNorms)); + doc.add(new Field("salary", st.nextToken(), textNoNorms)); + doc.add(new Field("type", st.nextToken(), textNoNorms)); + doc.add(new Field("description", st.nextToken(), textNoNorms)); writer.addDocument(doc); } line = br.readLine(); diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java index bbda0e28d6d..f56c9067e61 100644 --- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java +++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java @@ -133,7 +133,7 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); - fields.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); + fields.add(new Field(fieldInfo.name, new String(b, "UTF-8"), ft)); } else { in.seek(in.getFilePointer() + numUTF8Bytes); } diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java index 35ef911c679..990d3c4c401 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java @@ -64,7 +64,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); - document.add(new Field(FIELD, customType, new TokenStreamConcurrent())); + document.add(new Field(FIELD, new TokenStreamConcurrent(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -112,7 +112,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); - document.add(new Field(FIELD, customType, new TokenStreamConcurrent())); + document.add(new Field(FIELD, new TokenStreamConcurrent(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -187,7 +187,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); - document.add(new Field(FIELD, customType, new TokenStreamSparse())); + document.add(new Field(FIELD, new TokenStreamSparse(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -233,7 +233,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectors(true); - document.add(new Field(FIELD, customType, TEXT)); + document.add(new Field(FIELD, TEXT, customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -277,7 +277,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); - document.add(new Field(FIELD, customType, new TokenStreamSparse())); + document.add(new Field(FIELD, new TokenStreamSparse(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index 573e081e84f..95a230b880a 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -1625,7 +1625,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte private Document doc( String f, String v ){ Document doc = new Document(); - doc.add( new Field( f, TextField.TYPE_STORED, v)); + doc.add( new Field( f, v, TextField.TYPE_STORED)); return doc; } @@ -1776,7 +1776,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte private void addDoc(IndexWriter writer, String text) throws IOException { Document d = new Document(); - Field f = new Field(FIELD_NAME, TextField.TYPE_STORED, text); + Field f = new Field(FIELD_NAME, text, TextField.TYPE_STORED); d.add(f); writer.addDocument(d); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java index f482a674c44..9dbecbd9c6c 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java @@ -109,7 +109,7 @@ public class TokenSourcesTest extends LuceneTestCase { FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); - document.add(new Field(FIELD, customType, new TokenStreamOverlap())); + document.add(new Field(FIELD, new TokenStreamOverlap(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -158,7 +158,7 @@ public class TokenSourcesTest extends LuceneTestCase { customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); - document.add(new Field(FIELD, customType, new TokenStreamOverlap())); + document.add(new Field(FIELD, new TokenStreamOverlap(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -206,7 +206,7 @@ public class TokenSourcesTest extends LuceneTestCase { FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); - document.add(new Field(FIELD, customType, new TokenStreamOverlap())); + document.add(new Field(FIELD, new TokenStreamOverlap(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); @@ -255,7 +255,7 @@ public class TokenSourcesTest extends LuceneTestCase { FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); - document.add(new Field(FIELD, customType, new TokenStreamOverlap())); + document.add(new Field(FIELD, new TokenStreamOverlap(), customType)); indexWriter.addDocument(document); } finally { indexWriter.close(); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java index ec4bbf05897..166ec88a50e 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java @@ -359,7 +359,7 @@ public abstract class AbstractTestCase extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); for( String value: values ) { - doc.add( new Field( F, customType, value ) ); + doc.add( new Field( F, value, customType) ); } writer.addDocument( doc ); writer.close(); @@ -377,7 +377,7 @@ public abstract class AbstractTestCase extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); for( String value: values ) { - doc.add( new Field( F, customType, value )); + doc.add( new Field( F, value, customType)); //doc.add( new Field( F, value, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); } writer.addDocument( doc ); diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java index f9236345634..9ee505a3eab 100644 --- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java +++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java @@ -142,7 +142,7 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase { customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); - doc.add( new Field( F, customType, "aaa" ) ); + doc.add( new Field( F, "aaa", customType) ); //doc.add( new Field( F, "aaa", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS ) ); writer.addDocument( doc ); writer.close(); diff --git a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java index 004b8fb92ca..bda6802e3c7 100644 --- a/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java +++ b/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java @@ -211,39 +211,39 @@ public class TestIndicesEquals extends LuceneTestCase { customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); //document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - document.add(new Field("a", customType, i + " Do you really want to go and live in that house all winter?")); + document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", customType)); if (i > 0) { //document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - document.add(new Field("b0", customType, i + " All work and no play makes Jack a dull boy")); + document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", customType)); //document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO)); FieldType customType2 = new FieldType(TextField.TYPE_STORED); customType2.setTokenized(false); customType2.setOmitNorms(true); - document.add(new Field("b1", customType2, i + " All work and no play makes Jack a dull boy")); + document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType2)); //document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.NO)); FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); customType3.setTokenized(false); - document.add(new Field("b1", customType3, i + " All work and no play makes Jack a dull boy")); + document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType3)); //document.add(new Field("b3", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO, Field.TermVector.NO)); FieldType customType4 = new FieldType(TextField.TYPE_STORED); customType4.setIndexed(false); customType4.setTokenized(false); - document.add(new Field("b1", customType4, i + " All work and no play makes Jack a dull boy")); + document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", customType4)); if (i > 1) { //document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - document.add(new Field("c", customType, i + " Redrum redrum")); + document.add(new Field("c", i + " Redrum redrum", customType)); if (i > 2) { //document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - document.add(new Field("d", customType, i + " Hello Danny, come and play with us... forever and ever. and ever.")); + document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", customType)); if (i > 3) { //Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); //f.setOmitNorms(true); FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); customType5.setOmitNorms(true); - Field f = new Field("e", customType5, i + " Heres Johnny!"); + Field f = new Field("e", i + " Heres Johnny!", customType5); document.add(f); if (i > 4) { final List tokens = new ArrayList(2); diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java b/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java index 97982f9ec8d..61d6a70f6c4 100644 --- a/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java +++ b/lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java @@ -90,7 +90,7 @@ public class FieldSelectorVisitor extends StoredFieldVisitor { ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); - doc.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); + doc.add(new Field(fieldInfo.name, new String(b, "UTF-8"), ft)); return accept != FieldSelectorResult.LOAD; case LAZY_LOAD: case LATENT: diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index 4ef088afae8..171a383ce7d 100644 --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -113,10 +113,10 @@ public class TestIndexSplitter extends LuceneTestCase { Directory fsDir = newFSDirectory(indexPath); IndexWriter indexWriter = new IndexWriter(fsDir, iwConfig); Document doc = new Document(); - doc.add(new Field("content", StringField.TYPE_STORED, "doc 1")); + doc.add(new Field("content", "doc 1", StringField.TYPE_STORED)); indexWriter.addDocument(doc); doc = new Document(); - doc.add(new Field("content", StringField.TYPE_STORED, "doc 2")); + doc.add(new Field("content", "doc 2", StringField.TYPE_STORED)); indexWriter.addDocument(doc); indexWriter.close(); fsDir.close(); diff --git a/lucene/src/java/org/apache/lucene/document/BinaryField.java b/lucene/src/java/org/apache/lucene/document/BinaryField.java index d6dff0cc1df..813ba401109 100644 --- a/lucene/src/java/org/apache/lucene/document/BinaryField.java +++ b/lucene/src/java/org/apache/lucene/document/BinaryField.java @@ -31,16 +31,16 @@ public final class BinaryField extends Field { /** Creates a new BinaryField */ public BinaryField(String name, byte[] value) { - super(name, BinaryField.TYPE_STORED, value); + super(name, value, BinaryField.TYPE_STORED); } /** Creates a new BinaryField */ public BinaryField(String name, byte[] value, int offset, int length) { - super(name, BinaryField.TYPE_STORED, value, offset, length); + super(name, value, offset, length, BinaryField.TYPE_STORED); } /** Creates a new BinaryField */ public BinaryField(String name, BytesRef bytes) { - super(name, BinaryField.TYPE_STORED, bytes); + super(name, bytes, BinaryField.TYPE_STORED); } } diff --git a/lucene/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java b/lucene/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java index 51a348244b1..59def2bef5e 100644 --- a/lucene/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java +++ b/lucene/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java @@ -82,8 +82,8 @@ public class DocumentStoredFieldVisitor extends StoredFieldVisitor { ft.setOmitNorms(fieldInfo.omitNorms); ft.setIndexOptions(fieldInfo.indexOptions); doc.add(new Field(fieldInfo.name, - ft, - new String(b, "UTF-8"))); + new String(b, "UTF-8"), ft + )); } else { in.seek(in.getFilePointer() + numUTF8Bytes); } diff --git a/lucene/src/java/org/apache/lucene/document/Field.java b/lucene/src/java/org/apache/lucene/document/Field.java index 14e51ab3dbe..9c374736890 100644 --- a/lucene/src/java/org/apache/lucene/document/Field.java +++ b/lucene/src/java/org/apache/lucene/document/Field.java @@ -60,7 +60,7 @@ public class Field implements IndexableField { this.type = type; } - public Field(String name, IndexableFieldType type, Reader reader) { + public Field(String name, Reader reader, IndexableFieldType type) { if (name == null) { throw new NullPointerException("name cannot be null"); } @@ -76,7 +76,7 @@ public class Field implements IndexableField { this.type = type; } - public Field(String name, IndexableFieldType type, TokenStream tokenStream) { + public Field(String name, TokenStream tokenStream, IndexableFieldType type) { if (name == null) { throw new NullPointerException("name cannot be null"); } @@ -93,15 +93,15 @@ public class Field implements IndexableField { this.type = type; } - public Field(String name, IndexableFieldType type, byte[] value) { - this(name, type, value, 0, value.length); + public Field(String name, byte[] value, IndexableFieldType type) { + this(name, value, 0, value.length, type); } - public Field(String name, IndexableFieldType type, byte[] value, int offset, int length) { - this(name, type, new BytesRef(value, offset, length)); + public Field(String name, byte[] value, int offset, int length, IndexableFieldType type) { + this(name, new BytesRef(value, offset, length), type); } - public Field(String name, IndexableFieldType type, BytesRef bytes) { + public Field(String name, BytesRef bytes, IndexableFieldType type) { if (type.indexed() && !type.tokenized()) { throw new IllegalArgumentException("Non-tokenized fields must use String values"); } @@ -111,7 +111,7 @@ public class Field implements IndexableField { this.name = name; } - public Field(String name, IndexableFieldType type, String value) { + public Field(String name, String value, IndexableFieldType type) { if (name == null) { throw new IllegalArgumentException("name cannot be null"); } diff --git a/lucene/src/java/org/apache/lucene/document/StringField.java b/lucene/src/java/org/apache/lucene/document/StringField.java index 41a3f1d8b32..3b66f5105c0 100644 --- a/lucene/src/java/org/apache/lucene/document/StringField.java +++ b/lucene/src/java/org/apache/lucene/document/StringField.java @@ -54,7 +54,7 @@ public final class StringField extends Field { /** Creates a new un-stored StringField */ public StringField(String name, String value) { - super(name, TYPE_UNSTORED, value); + super(name, value, TYPE_UNSTORED); } @Override diff --git a/lucene/src/java/org/apache/lucene/document/TextField.java b/lucene/src/java/org/apache/lucene/document/TextField.java index 19131da7045..8d2bf2bd517 100644 --- a/lucene/src/java/org/apache/lucene/document/TextField.java +++ b/lucene/src/java/org/apache/lucene/document/TextField.java @@ -50,16 +50,16 @@ public final class TextField extends Field { /** Creates a new un-stored TextField */ public TextField(String name, Reader reader) { - super(name, TextField.TYPE_UNSTORED, reader); + super(name, reader, TextField.TYPE_UNSTORED); } /** Creates a new un-stored TextField */ public TextField(String name, String value) { - super(name, TextField.TYPE_UNSTORED, value); + super(name, value, TextField.TYPE_UNSTORED); } /** Creates a new un-stored TextField */ public TextField(String name, TokenStream stream) { - super(name, TextField.TYPE_UNSTORED, stream); + super(name, stream, TextField.TYPE_UNSTORED); } } diff --git a/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java b/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java index e693d8ff013..e7d594aa432 100644 --- a/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java +++ b/lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java @@ -188,12 +188,12 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy { Document d = new Document(); FieldType ft = new FieldType(); ft.setStored(true); - d.add(new Field(SNAPSHOTS_ID, ft, "")); + d.add(new Field(SNAPSHOTS_ID, "", ft)); for (Entry e : super.getSnapshots().entrySet()) { - d.add(new Field(e.getKey(), ft, e.getValue())); + d.add(new Field(e.getKey(), e.getValue(), ft)); } if (id != null) { - d.add(new Field(id, ft, segment)); + d.add(new Field(id, segment, ft)); } writer.addDocument(d); writer.commit(); diff --git a/lucene/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java b/lucene/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java index 4b8f8202606..0a88ccfdbe2 100644 --- a/lucene/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java +++ b/lucene/src/test-framework/org/apache/lucene/analysis/CollationTestBase.java @@ -81,8 +81,8 @@ public abstract class CollationTestBase extends LuceneTestCase { IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628")); - doc.add(new Field("body", StringField.TYPE_STORED, "body")); + doc.add(new Field("content", "\u0633\u0627\u0628", TextField.TYPE_STORED)); + doc.add(new Field("body", "body", StringField.TYPE_STORED)); writer.addDocument(doc); writer.close(); IndexSearcher searcher = new IndexSearcher(ramDir, true); @@ -116,7 +116,7 @@ public abstract class CollationTestBase extends LuceneTestCase { // orders the U+0698 character before the U+0633 character, so the single // index Term below should NOT be returned by a TermRangeQuery with a Farsi // Collator (or an Arabic one for the case when Farsi is not supported). - doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628")); + doc.add(new Field("content", "\u0633\u0627\u0628", TextField.TYPE_STORED)); writer.addDocument(doc); writer.close(); IndexSearcher searcher = new IndexSearcher(ramDir, true); @@ -138,8 +138,8 @@ public abstract class CollationTestBase extends LuceneTestCase { IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628")); - doc.add(new Field("body", StringField.TYPE_STORED, "body")); + doc.add(new Field("content", "\u0633\u0627\u0628", TextField.TYPE_STORED)); + doc.add(new Field("body", "body", StringField.TYPE_STORED)); writer.addDocument(doc); writer.close(); @@ -204,7 +204,7 @@ public abstract class CollationTestBase extends LuceneTestCase { for (int i = 0 ; i < sortData.length ; ++i) { Document doc = new Document(); - doc.add(new Field("tracer", customType, sortData[i][0])); + doc.add(new Field("tracer", sortData[i][0], customType)); doc.add(new TextField("contents", sortData[i][1])); if (sortData[i][2] != null) doc.add(new TextField("US", usAnalyzer.tokenStream("US", new StringReader(sortData[i][2])))); diff --git a/lucene/src/test-framework/org/apache/lucene/index/DocHelper.java b/lucene/src/test-framework/org/apache/lucene/index/DocHelper.java index 1f815d58a54..e0b30f0ce7f 100644 --- a/lucene/src/test-framework/org/apache/lucene/index/DocHelper.java +++ b/lucene/src/test-framework/org/apache/lucene/index/DocHelper.java @@ -46,7 +46,7 @@ class DocHelper { public static Field textField1; static { customType = new FieldType(TextField.TYPE_STORED); - textField1 = new Field(TEXT_FIELD_1_KEY, customType, FIELD_1_TEXT); + textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT, customType); } public static final FieldType customType2; @@ -60,7 +60,7 @@ class DocHelper { customType2.setStoreTermVectors(true); customType2.setStoreTermVectorPositions(true); customType2.setStoreTermVectorOffsets(true); - textField2 = new Field(TEXT_FIELD_2_KEY, customType2, FIELD_2_TEXT); + textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, customType2); } public static final FieldType customType3; @@ -71,14 +71,14 @@ class DocHelper { static { customType3 = new FieldType(TextField.TYPE_STORED); customType3.setOmitNorms(true); - textField3 = new Field(TEXT_FIELD_3_KEY, customType3, FIELD_3_TEXT); + textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, customType3); } public static final String KEYWORD_TEXT = "Keyword"; public static final String KEYWORD_FIELD_KEY = "keyField"; public static Field keyField; static { - keyField = new Field(KEYWORD_FIELD_KEY, StringField.TYPE_STORED, KEYWORD_TEXT); + keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT, StringField.TYPE_STORED); } public static final FieldType customType5; @@ -89,7 +89,7 @@ class DocHelper { customType5 = new FieldType(TextField.TYPE_STORED); customType5.setOmitNorms(true); customType5.setTokenized(false); - noNormsField = new Field(NO_NORMS_KEY, customType5, NO_NORMS_TEXT); + noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT, customType5); } public static final FieldType customType6; @@ -99,7 +99,7 @@ class DocHelper { static { customType6 = new FieldType(TextField.TYPE_STORED); customType6.setIndexOptions(IndexOptions.DOCS_ONLY); - noTFField = new Field(NO_TF_KEY, customType6, NO_TF_TEXT); + noTFField = new Field(NO_TF_KEY, NO_TF_TEXT, customType6); } public static final FieldType customType7; @@ -109,13 +109,13 @@ class DocHelper { static { customType7 = new FieldType(); customType7.setStored(true); - unIndField = new Field(UNINDEXED_FIELD_KEY, customType7, UNINDEXED_FIELD_TEXT); + unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT, customType7); } public static final String UNSTORED_1_FIELD_TEXT = "unstored field text"; public static final String UNSTORED_FIELD_1_KEY = "unStoredField1"; - public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, TextField.TYPE_UNSTORED, UNSTORED_1_FIELD_TEXT); + public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, TextField.TYPE_UNSTORED); public static final FieldType customType8; public static final String UNSTORED_2_FIELD_TEXT = "unstored field text"; @@ -124,7 +124,7 @@ class DocHelper { static { customType8 = new FieldType(TextField.TYPE_UNSTORED); customType8.setStoreTermVectors(true); - unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, customType8, UNSTORED_2_FIELD_TEXT); + unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT, customType8); } public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary"; @@ -133,7 +133,7 @@ class DocHelper { public static final String LAZY_FIELD_KEY = "lazyField"; public static final String LAZY_FIELD_TEXT = "These are some field bytes"; - public static Field lazyField = new Field(LAZY_FIELD_KEY, customType, LAZY_FIELD_TEXT); + public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, customType); public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField"; public static String LARGE_LAZY_FIELD_TEXT; @@ -142,13 +142,13 @@ class DocHelper { //From Issue 509 public static final String FIELD_UTF1_TEXT = "field one \u4e00text"; public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8"; - public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, customType, FIELD_UTF1_TEXT); + public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, customType); public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text"; //Fields will be lexicographically sorted. So, the order is: field, text, two public static final int [] FIELD_UTF2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8"; - public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, customType2, FIELD_UTF2_TEXT); + public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, customType2); @@ -200,7 +200,7 @@ class DocHelper { lazyFieldBinary = new BinaryField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); fields[fields.length - 2] = lazyFieldBinary; LARGE_LAZY_FIELD_TEXT = buffer.toString(); - largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, customType, LARGE_LAZY_FIELD_TEXT); + largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, customType); fields[fields.length - 1] = largeLazyField; for (int i=0; i(); // Initialize the map with the default fields. - fields.put(BODY_FIELD, new Field(BODY_FIELD, bodyFt, "")); - fields.put(TITLE_FIELD, new Field(TITLE_FIELD, ft, "")); - fields.put(DATE_FIELD, new Field(DATE_FIELD, ft, "")); - fields.put(ID_FIELD, new Field(ID_FIELD, StringField.TYPE_STORED, "")); - fields.put(NAME_FIELD, new Field(NAME_FIELD, ft, "")); + fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyFt)); + fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", ft)); + fields.put(DATE_FIELD, new Field(DATE_FIELD, "", ft)); + fields.put(ID_FIELD, new Field(ID_FIELD, "", StringField.TYPE_STORED)); + fields.put(NAME_FIELD, new Field(NAME_FIELD, "", ft)); numericFields.put(DATE_MSEC_FIELD, new NumericField(DATE_MSEC_FIELD)); numericFields.put(TIME_SEC_FIELD, new NumericField(TIME_SEC_FIELD)); @@ -127,12 +127,12 @@ public class DocMaker { */ Field getField(String name, FieldType ft) { if (!reuseFields) { - return new Field(name, ft, ""); + return new Field(name, "", ft); } Field f = fields.get(name); if (f == null) { - f = new Field(name, ft, ""); + f = new Field(name, "", ft); fields.put(name, f); } return f; diff --git a/modules/facet/src/examples/org/apache/lucene/facet/example/association/AssociationIndexer.java b/modules/facet/src/examples/org/apache/lucene/facet/example/association/AssociationIndexer.java index 6d8eb9308e9..13828370dc3 100644 --- a/modules/facet/src/examples/org/apache/lucene/facet/example/association/AssociationIndexer.java +++ b/modules/facet/src/examples/org/apache/lucene/facet/example/association/AssociationIndexer.java @@ -93,7 +93,7 @@ public class AssociationIndexer { // create a plain Lucene document and add some regular Lucene fields // to it Document doc = new Document(); - doc.add(new Field(SimpleUtils.TITLE, TextField.TYPE_STORED, SimpleUtils.docTitles[docNum])); + doc.add(new Field(SimpleUtils.TITLE, SimpleUtils.docTitles[docNum], TextField.TYPE_STORED)); doc.add(new TextField(SimpleUtils.TEXT, SimpleUtils.docTexts[docNum])); // invoke the category document builder for adding categories to the diff --git a/modules/facet/src/examples/org/apache/lucene/facet/example/multiCL/MultiCLIndexer.java b/modules/facet/src/examples/org/apache/lucene/facet/example/multiCL/MultiCLIndexer.java index be3114ba389..21e8475001a 100644 --- a/modules/facet/src/examples/org/apache/lucene/facet/example/multiCL/MultiCLIndexer.java +++ b/modules/facet/src/examples/org/apache/lucene/facet/example/multiCL/MultiCLIndexer.java @@ -173,7 +173,7 @@ public class MultiCLIndexer { // create a plain Lucene document and add some regular Lucene fields // to it Document doc = new Document(); - doc.add(new Field(SimpleUtils.TITLE, TextField.TYPE_STORED, docTitles[docNum])); + doc.add(new Field(SimpleUtils.TITLE, docTitles[docNum], TextField.TYPE_STORED)); doc.add(new TextField(SimpleUtils.TEXT, docTexts[docNum])); // finally add the document to the index diff --git a/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleIndexer.java b/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleIndexer.java index 78b47208dde..117fa1386a7 100644 --- a/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleIndexer.java +++ b/modules/facet/src/examples/org/apache/lucene/facet/example/simple/SimpleIndexer.java @@ -70,7 +70,7 @@ public class SimpleIndexer { // create a plain Lucene document and add some regular Lucene fields to it Document doc = new Document(); - doc.add(new Field(SimpleUtils.TITLE, TextField.TYPE_STORED, SimpleUtils.docTitles[docNum])); + doc.add(new Field(SimpleUtils.TITLE, SimpleUtils.docTitles[docNum], TextField.TYPE_STORED)); doc.add(new TextField(SimpleUtils.TEXT, SimpleUtils.docTexts[docNum])); // invoke the category document builder for adding categories to the document and, diff --git a/modules/facet/src/java/org/apache/lucene/facet/index/CategoryDocumentBuilder.java b/modules/facet/src/java/org/apache/lucene/facet/index/CategoryDocumentBuilder.java index 2a21640bf0a..fb5c1831f43 100644 --- a/modules/facet/src/java/org/apache/lucene/facet/index/CategoryDocumentBuilder.java +++ b/modules/facet/src/java/org/apache/lucene/facet/index/CategoryDocumentBuilder.java @@ -187,7 +187,7 @@ public class CategoryDocumentBuilder implements DocumentBuilder { // super.build()) FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setOmitNorms(true); - fieldList.add(new Field(e.getKey(), ft, stream)); + fieldList.add(new Field(e.getKey(), stream, ft)); } return this; diff --git a/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java b/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java index c3a9831b270..2b87dde629a 100644 --- a/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java +++ b/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java @@ -181,8 +181,8 @@ public class LuceneTaxonomyWriter implements TaxonomyWriter { FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setOmitNorms(true); - parentStreamField = new Field(Consts.FIELD_PAYLOADS, ft, parentStream); - fullPathField = new Field(Consts.FULL, StringField.TYPE_STORED, ""); + parentStreamField = new Field(Consts.FIELD_PAYLOADS, parentStream, ft); + fullPathField = new Field(Consts.FULL, "", StringField.TYPE_STORED); this.nextID = indexWriter.maxDoc(); diff --git a/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java b/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java index bf711a40f0a..1e216837e5f 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java +++ b/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java @@ -245,7 +245,7 @@ public abstract class FacetTestBase extends LuceneTestCase { CategoryDocumentBuilder builder = new CategoryDocumentBuilder(tw, iParams); builder.setCategoryPaths(categories); builder.build(d); - d.add(new Field("content", TextField.TYPE_STORED, content)); + d.add(new Field("content", content, TextField.TYPE_STORED)); iw.addDocument(d); } diff --git a/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java b/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java index 0b12f332fd2..66fc7ba3295 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java +++ b/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java @@ -128,7 +128,7 @@ public class FacetTestUtils { cps.add(cp); Document d = new Document(); new CategoryDocumentBuilder(tw, iParams).setCategoryPaths(cps).build(d); - d.add(new Field("content", TextField.TYPE_STORED, "alpha")); + d.add(new Field("content", "alpha", TextField.TYPE_STORED)); iw.addDocument(d); } diff --git a/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java b/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java index 541651e25d0..d70d0eda98e 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java +++ b/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java @@ -328,7 +328,7 @@ public class TestTopKInEachNodeResultHandler extends LuceneTestCase { cps.add(cp); Document d = new Document(); new CategoryDocumentBuilder(tw, iParams).setCategoryPaths(cps).build(d); - d.add(new Field("content", TextField.TYPE_STORED, "alpha")); + d.add(new Field("content", "alpha", TextField.TYPE_STORED)); iw.addDocument(d); } diff --git a/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java b/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java index 870d62dbbe0..db16003ee43 100644 --- a/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java +++ b/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java @@ -210,7 +210,7 @@ public class TestScoredDocIDsUtils extends LuceneTestCase { // assert that those docs are not returned by all-scored-doc-IDs. FieldType ft = new FieldType(); ft.setStored(true); - doc.add(new Field("del", ft, Integer.toString(docNum))); + doc.add(new Field("del", Integer.toString(docNum), ft)); } if (haveAlpha(docNum)) { diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java index 6d384280b11..fdf100de981 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java @@ -45,51 +45,51 @@ public class TermAllGroupsCollectorTest extends LuceneTestCase { new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "random text")); - doc.add(new Field("id", customType, "1")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "1", customType)); w.addDocument(doc); // 1 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random text blob")); - doc.add(new Field("id", customType, "2")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random text blob", TextField.TYPE_STORED)); + doc.add(new Field("id", "2", customType)); w.addDocument(doc); // 2 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data")); - doc.add(new Field("id", customType, "3")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random textual data", TextField.TYPE_STORED)); + doc.add(new Field("id", "3", customType)); w.addDocument(doc); w.commit(); // To ensure a second segment // 3 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author2")); - doc.add(new Field("content", TextField.TYPE_STORED, "some random text")); - doc.add(new Field("id", customType, "4")); + doc.add(new Field(groupField, "author2", TextField.TYPE_STORED)); + doc.add(new Field("content", "some random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "4", customType)); w.addDocument(doc); // 4 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author3")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random text")); - doc.add(new Field("id", customType, "5")); + doc.add(new Field(groupField, "author3", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "5", customType)); w.addDocument(doc); // 5 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author3")); - doc.add(new Field("content", TextField.TYPE_STORED, "random blob")); - doc.add(new Field("id", customType, "6")); + doc.add(new Field(groupField, "author3", TextField.TYPE_STORED)); + doc.add(new Field("content", "random blob", TextField.TYPE_STORED)); + doc.add(new Field("id", "6", customType)); w.addDocument(doc); // 6 -- no author field doc = new Document(); - doc.add(new Field("content", TextField.TYPE_STORED, "random word stuck in alot of other text")); - doc.add(new Field("id", customType, "6")); + doc.add(new Field("content", "random word stuck in alot of other text", TextField.TYPE_STORED)); + doc.add(new Field("id", "6", customType)); w.addDocument(doc); IndexSearcher indexSearcher = new IndexSearcher(w.getReader()); diff --git a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index 0c0f14dae10..aea14170fec 100644 --- a/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -61,50 +61,50 @@ public class TestGrouping extends LuceneTestCase { new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "random text")); - doc.add(new Field("id", customType, "1")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "1", customType)); w.addDocument(doc); // 1 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random text")); - doc.add(new Field("id", customType, "2")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "2", customType)); w.addDocument(doc); // 2 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author1")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data")); - doc.add(new Field("id", customType, "3")); + doc.add(new Field(groupField, "author1", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random textual data", TextField.TYPE_STORED)); + doc.add(new Field("id", "3", customType)); w.addDocument(doc); // 3 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author2")); - doc.add(new Field("content", TextField.TYPE_STORED, "some random text")); - doc.add(new Field("id", customType, "4")); + doc.add(new Field(groupField, "author2", TextField.TYPE_STORED)); + doc.add(new Field("content", "some random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "4", customType)); w.addDocument(doc); // 4 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author3")); - doc.add(new Field("content", TextField.TYPE_STORED, "some more random text")); - doc.add(new Field("id", customType, "5")); + doc.add(new Field(groupField, "author3", TextField.TYPE_STORED)); + doc.add(new Field("content", "some more random text", TextField.TYPE_STORED)); + doc.add(new Field("id", "5", customType)); w.addDocument(doc); // 5 doc = new Document(); - doc.add(new Field(groupField, TextField.TYPE_STORED, "author3")); - doc.add(new Field("content", TextField.TYPE_STORED, "random")); - doc.add(new Field("id", customType, "6")); + doc.add(new Field(groupField, "author3", TextField.TYPE_STORED)); + doc.add(new Field("content", "random", TextField.TYPE_STORED)); + doc.add(new Field("id", "6", customType)); w.addDocument(doc); // 6 -- no author field doc = new Document(); - doc.add(new Field("content", TextField.TYPE_STORED, "random word stuck in alot of other text")); - doc.add(new Field("id", customType, "6")); + doc.add(new Field("content", "random word stuck in alot of other text", TextField.TYPE_STORED)); + doc.add(new Field("id", "6", customType)); w.addDocument(doc); IndexSearcher indexSearcher = new IndexSearcher(w.getReader()); diff --git a/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index 60d5c0af3d2..253d82e6afa 100755 --- a/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -590,7 +590,7 @@ public class SpellChecker implements java.io.Closeable { Document doc = new Document(); // the word field is never queried on... its indexed so it can be quickly // checked for rebuild (and stored for retrieval). Doesn't need norms or TF/pos - Field f = new Field(F_WORD, StringField.TYPE_STORED, text); + Field f = new Field(F_WORD, text, StringField.TYPE_STORED); doc.add(f); // orig term addGram(text, doc, ng1, ng2); return doc; @@ -605,7 +605,7 @@ public class SpellChecker implements java.io.Closeable { String gram = text.substring(i, i + ng); FieldType ft = new FieldType(StringField.TYPE_UNSTORED); ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); - Field ngramField = new Field(key, ft, gram); + Field ngramField = new Field(key, gram, ft); // spellchecker does not use positional queries, but we want freqs // for scoring these multivalued n-gram fields. doc.add(ngramField); diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java index 814df41b988..05633d11e86 100644 --- a/solr/core/src/java/org/apache/solr/schema/FieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java @@ -273,7 +273,7 @@ public abstract class FieldType extends FieldProperties { * @return the {@link org.apache.lucene.index.IndexableField}. */ protected IndexableField createField(String name, String val, org.apache.lucene.document.FieldType type, float boost){ - Field f = new Field(name, type, val); + Field f = new Field(name, val, type); f.setBoost(boost); return f; } diff --git a/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java b/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java index cff0b35e5f9..9ac9939c686 100644 --- a/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java +++ b/solr/core/src/test/org/apache/solr/core/TestArbitraryIndexDir.java @@ -106,8 +106,8 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{ new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40)) ); Document doc = new Document(); - doc.add(new Field("id", TextField.TYPE_STORED, "2")); - doc.add(new Field("name", TextField.TYPE_STORED, "name2")); + doc.add(new Field("id", "2", TextField.TYPE_STORED)); + doc.add(new Field("name", "name2", TextField.TYPE_STORED)); iw.addDocument(doc); iw.commit(); iw.close(); diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java index 8347e2432f7..feb6f40167b 100755 --- a/solr/core/src/test/org/apache/solr/search/TestSort.java +++ b/solr/core/src/test/org/apache/solr/search/TestSort.java @@ -150,8 +150,8 @@ public class TestSort extends SolrTestCaseJ4 { public void testSort() throws Exception { Directory dir = new RAMDirectory(); - Field f = new Field("f", StringField.TYPE_UNSTORED,"0"); - Field f2 = new Field("f2", StringField.TYPE_UNSTORED,"0"); + Field f = new Field("f", "0", StringField.TYPE_UNSTORED); + Field f2 = new Field("f2", "0", StringField.TYPE_UNSTORED); for (int iterCnt = 0; iterCnt