LUCENE-2640: add LuceneTestCase[J4].newField

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@996268 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2010-09-12 09:02:09 +00:00
parent 98293eaf5a
commit c33d37d9cf
159 changed files with 735 additions and 636 deletions

View File

@ -108,8 +108,8 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
IndexWriter writer = new IndexWriter(ramdir,
new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
Document doc = new Document();
Field field1 = new Field("foo", fooField.toString(), Field.Store.NO, Field.Index.ANALYZED);
Field field2 = new Field("term", termField.toString(), Field.Store.NO, Field.Index.ANALYZED);
Field field1 = newField("foo", fooField.toString(), Field.Store.NO, Field.Index.ANALYZED);
Field field2 = newField("term", termField.toString(), Field.Store.NO, Field.Index.ANALYZED);
doc.add(field1);
doc.add(field2);
writer.addDocument(doc);

View File

@ -62,13 +62,13 @@ public class TestFieldNormModifier extends LuceneTestCase {
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
d.add(new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
d.add(new Field("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("field", "word", Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
d.add(newField("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED));
for (int j = 1; j <= i; j++) {
d.add(new Field("field", "crap", Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
d.add(newField("field", "crap", Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);
}

View File

@ -36,8 +36,8 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
doc = new Document();
doc.add(new Field("id", i + "", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("f", i + " " + i, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", i + "", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("f", i + " " + i, Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
}
w.close();

View File

@ -30,33 +30,33 @@ public class TestTermVectorAccessor extends LuceneTestCase {
Document doc;
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
iw.close();

View File

@ -143,7 +143,7 @@ public class TestAppendingCodec extends LuceneTestCase {
((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundDocStore(false);
IndexWriter writer = new IndexWriter(dir, cfg);
Document doc = new Document();
doc.add(new Field("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(doc);

View File

@ -189,7 +189,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
}
/********************Testing Utils**********************************/
private static void indexDocs(IndexWriter writer) throws Exception {
private void indexDocs(IndexWriter writer) throws Exception {
/**
* Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared).
@ -198,9 +198,9 @@ public class TestHighFreqTerms extends LuceneTestCase {
Document doc = new Document();
String content = getContent(i);
doc.add(new Field("FIELD_1", content, Field.Store.YES,Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("FIELD_1", content, Field.Store.YES,Field.Index.ANALYZED, Field.TermVector.NO));
//add a different field
doc.add(new Field("different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(doc);
}
@ -208,7 +208,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
//highest freq terms for a specific field.
for (int i = 1; i <= 10; i++) {
Document doc = new Document();
doc.add(new Field("different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(doc);
}
// add some docs where tf < df so we can see if sorting works
@ -219,7 +219,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
for (int i = 0; i < highTF; i++) {
content += "highTF ";
}
doc.add(new Field("FIELD_1", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("FIELD_1", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(doc);
// highTF medium df =5
int medium_df = 5;
@ -230,7 +230,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
for (int j = 0; j < tf; j++) {
newcontent += "highTFmedDF ";
}
newdoc.add(new Field("FIELD_1", newcontent, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
newdoc.add(newField("FIELD_1", newcontent, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(newdoc);
}
// add a doc with high tf in field different_field
@ -240,7 +240,7 @@ public class TestHighFreqTerms extends LuceneTestCase {
for (int i = 0; i < targetTF; i++) {
content += "TF150 ";
}
doc.add(new Field("different_field", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("different_field", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(doc);
writer.close();

View File

@ -65,15 +65,15 @@ public class TestLengthNormModifier extends LuceneTestCase {
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
d.add(new Field("field", "word",
d.add(newField("field", "word",
Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "word",
d.add(newField("nonorm", "word",
Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
for (int j = 1; j <= i; j++) {
d.add(new Field("field", "crap",
d.add(newField("field", "crap",
Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "more words",
d.add(newField("nonorm", "more words",
Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);

View File

@ -62,10 +62,10 @@ public class BooleanFilterTest extends LuceneTestCase {
private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
{
Document doc=new Document();
doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("price",price,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("price",price,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -63,9 +63,9 @@ public class ChainedFilterTest extends LuceneTestCase {
for (int i = 0; i < MAX; i++) {
Document doc = new Document();
doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
cal.add(Calendar.DATE, 1);

View File

@ -75,9 +75,9 @@ public class DuplicateFilterTest extends LuceneTestCase {
private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException
{
Document doc=new Document();
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(new Field("text",text,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(newField("text",text,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -65,8 +65,8 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private void addDoc(RandomIndexWriter writer, String name, String id) throws IOException
{
Document doc=new Document();
doc.add(new Field("name",name,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("id",id,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("name",name,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("id",id,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -57,7 +57,7 @@ public class TermsFilterTest extends LuceneTestCase {
for (int i = 0; i < 100; i++) {
Document doc=new Document();
int term=i*10; //terms are units of 10;
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(newField(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
IndexReader mainReader = w.getReader();

View File

@ -43,7 +43,7 @@ public class TestRegexQuery extends LuceneTestCase {
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
Document doc = new Document();
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
reader = writer.getReader();
writer.close();

View File

@ -59,15 +59,15 @@ public class TestSpanRegexQuery extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog",
// doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
// writer.addDocument(doc);
// doc = new Document();
doc.add(new Field("field", "auto update", Field.Store.NO,
doc.add(newField("field", "auto update", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "first auto update", Field.Store.NO,
doc.add(newField("field", "first auto update", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
@ -115,12 +115,12 @@ public class TestSpanRegexQuery extends LuceneTestCase {
LockObtainFailedException, IOException {
// creating a document to store
Document lDoc = new Document();
lDoc.add(new Field("field", "a1 b1", Field.Store.NO,
lDoc.add(newField("field", "a1 b1", Field.Store.NO,
Field.Index.ANALYZED_NO_NORMS));
// creating a document to store
Document lDoc2 = new Document();
lDoc2.add(new Field("field", "a2 b2", Field.Store.NO,
lDoc2.add(newField("field", "a2 b2", Field.Store.NO,
Field.Index.ANALYZED_NO_NORMS));
// creating first index writer

View File

@ -66,7 +66,7 @@ public class TestMoreLikeThis extends LuceneTestCase {
private void addDoc(RandomIndexWriter writer, String text) throws IOException {
Document doc = new Document();
doc.add(new Field("text", text, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("text", text, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -115,9 +115,9 @@ public class TestComplexPhraseQuery extends LuceneTestCase {
IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
for (int i = 0; i < docsContent.length; i++) {
Document doc = new Document();
doc.add(new Field("name", docsContent[i].name, Field.Store.YES,
doc.add(newField("name", docsContent[i].name, Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("id", docsContent[i].id, Field.Store.YES,
doc.add(newField("id", docsContent[i].id, Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
}

View File

@ -320,7 +320,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("body", "blah the footest blah", Field.Store.NO,
doc.add(newField("body", "blah the footest blah", Field.Store.NO,
Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();

View File

@ -324,7 +324,7 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("body", "blah the footest blah", Field.Store.NO,
doc.add(newField("body", "blah the footest blah", Field.Store.NO,
Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();

View File

@ -656,7 +656,7 @@ public class TestQPHelper extends LocalizedTestCase {
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
iw.addDocument(doc);
iw.close();
@ -1250,13 +1250,13 @@ public class TestQPHelper extends LocalizedTestCase {
assertEquals(expected, hits.length);
}
private static void addDateDoc(String content, int year, int month, int day,
private void addDateDoc(String content, int year, int month, int day,
int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance(Locale.ENGLISH);
cal.set(year, month - 1, day, hour, minute, second);
d.add(new Field("date", DateField.dateToString(cal.getTime()),
d.add(newField("date", DateField.dateToString(cal.getTime()),
Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}
@ -1306,7 +1306,7 @@ public class TestQPHelper extends LocalizedTestCase {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);

View File

@ -658,7 +658,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
IndexWriter iw = new IndexWriter(ramDir, new MockAnalyzer(MockTokenizer.WHITESPACE, false), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
doc.add(newField("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
iw.addDocument(doc);
iw.close();
@ -1201,13 +1201,13 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertEquals(expected, hits.length);
}
private static void addDateDoc(String content, int year, int month, int day,
private void addDateDoc(String content, int year, int month, int day,
int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance(Locale.ENGLISH);
cal.set(year, month - 1, day, hour, minute, second);
d.add(new Field("date", DateField.dateToString(cal.getTime()),
d.add(newField("date", DateField.dateToString(cal.getTime()),
Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}

View File

@ -46,15 +46,15 @@ public class TestRemoteCachingWrapperFilter extends RemoteTestCaseJ4 {
IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(random,
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("type", "A", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "type", "A", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
//Need a second document to search for
doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("type", "B", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "type", "B", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -45,8 +45,8 @@ public class TestRemoteSearchable extends RemoteTestCaseJ4 {
IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(random,
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(random, "other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -98,14 +98,14 @@ public class TestCartesian extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("name", name,Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED));
// convert the lat / long to lucene fields
doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat));
doc.add(new NumericField(lngField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lng));
// add a default meta field to make searching all documents easy
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
int ctpsize = ctps.size();
for (int i =0; i < ctpsize; i++){
@ -114,7 +114,7 @@ public class TestCartesian extends LuceneTestCase {
Field.Store.YES,
true).setDoubleValue(ctp.getTierBoxId(lat,lng)));
doc.add(new Field(geoHashPrefix, GeoHashUtils.encode(lat,lng),
doc.add(newField(geoHashPrefix, GeoHashUtils.encode(lat,lng),
Field.Store.YES,
Field.Index.NOT_ANALYZED_NO_NORMS));
}

View File

@ -60,14 +60,14 @@ public class TestDistance extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("name", name,Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED));
// convert the lat / long to lucene fields
doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat));
doc.add(new NumericField(lngField, Integer.MAX_VALUE,Field.Store.YES, true).setDoubleValue(lng));
// add a default meta field to make searching all documents easy
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -51,11 +51,11 @@ public class TestLuceneDictionary extends LuceneTestCase {
Document doc;
doc = new Document();
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
@ -67,7 +67,7 @@ public class TestLuceneDictionary extends LuceneTestCase {
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();

View File

@ -58,9 +58,9 @@ public class TestSpellChecker extends LuceneTestCase {
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(new Field("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
doc.add(new Field("field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
doc.add(newField("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
doc.add(newField("field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
writer.addDocument(doc);
}
writer.close();

View File

@ -66,8 +66,8 @@ public class TestParser extends LuceneTestCase {
String date=line.substring(0,endOfDate).trim();
String content=line.substring(endOfDate).trim();
org.apache.lucene.document.Document doc =new org.apache.lucene.document.Document();
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("contents",content,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField("contents",content,Field.Store.YES,Field.Index.ANALYZED));
NumericField numericField = new NumericField("date2");
numericField.setIntValue(Integer.valueOf(date));
doc.add(numericField);

View File

@ -126,7 +126,7 @@ public class TestQueryTemplateManager extends LuceneTestCase {
if(st.hasMoreTokens())
{
String value=st.nextToken().trim();
result.add(new Field(name,value,Field.Store.YES,Field.Index.ANALYZED));
result.add(newField(name,value,Field.Store.YES,Field.Index.ANALYZED));
}
}
return result;

View File

@ -57,7 +57,7 @@ public class TestDemo extends LuceneTestCase {
Document doc = new Document();
String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
String text = "This is the text to be indexed. " + longTerm;
doc.add(new Field("fieldname", text, Field.Store.YES,
doc.add(newField("fieldname", text, Field.Store.YES,
Field.Index.ANALYZED));
iwriter.addDocument(doc);
iwriter.close();

View File

@ -607,11 +607,11 @@ public class TestExternalCodecs extends LuceneTestCase {
w.setMergeFactor(3);
Document doc = new Document();
// uses default codec:
doc.add(new Field("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED));
// uses pulsing codec:
doc.add(new Field("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED));
Field idField = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int i=0;i<NUM_DOCS;i++) {
idField.setValue(""+i);

View File

@ -86,7 +86,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
dir.failOn(new FailOnlyOnMerge());
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(

View File

@ -91,7 +91,7 @@ public class TestSearch extends LuceneTestCase {
};
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -89,11 +89,11 @@ public class TestSearchForDuplicates extends LuceneTestCase {
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
// NOTE: this ID_FIELD produces no tokens since
// MockAnalyzer discards numbers
d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -150,8 +150,8 @@ public class TestAddIndexes extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO,
doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("content", "bbb " + i, Field.Store.NO,
Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
@ -186,8 +186,8 @@ public class TestAddIndexes extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
@ -224,8 +224,8 @@ public class TestAddIndexes extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO,
doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("content", "bbb " + i, Field.Store.NO,
Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
@ -456,7 +456,7 @@ public class TestAddIndexes extends LuceneTestCase {
private void addDocs(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO,
doc.add(newField("content", "aaa", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -465,7 +465,7 @@ public class TestAddIndexes extends LuceneTestCase {
private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new Field("content", "bbb", Field.Store.NO,
doc.add(newField("content", "bbb", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -530,19 +530,19 @@ public class TestAddIndexes extends LuceneTestCase {
.setMaxBufferedDocs(5).setMergePolicy(lmp));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<60;i++)
writer.addDocument(doc);
Document doc2 = new Document();
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
for(int i=0;i<10;i++)
writer.addDocument(doc2);

View File

@ -135,8 +135,8 @@ public class TestAtomicUpdate extends LuceneTestCase {
// Establish a base index of 100 docs:
for(int i=0;i<100;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
if ((i-1)%7 == 0) {
writer.commit();
}

View File

@ -479,7 +479,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundDocStore(doCFS);
IndexWriter writer = new IndexWriter(dir, conf);
@ -491,7 +491,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.close();
// open fresh writer so we get no prx file in the added segment
conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS);
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundDocStore(doCFS);
writer = new IndexWriter(dir, conf);

View File

@ -36,7 +36,7 @@ public class TestCheckIndex extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++) {
writer.addDocument(doc);
}

View File

@ -355,7 +355,7 @@ public class TestCodecs extends MultiCodecTestCase {
pq.add(new Term("content", "ccc"));
final Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd", Store.NO, Field.Index.ANALYZED_NO_NORMS));
doc.add(newField("content", "aaa bbb ccc ddd", Store.NO, Field.Index.ANALYZED_NO_NORMS));
// add document and force commit for creating a first segment
writer.addDocument(doc);

View File

@ -65,7 +65,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
int extraCount = 0;
@ -116,7 +116,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
.setMergePolicy(mp));
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int i=0;i<10;i++) {
for(int j=0;j<100;j++) {
@ -151,7 +151,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
for(int j=0;j<21;j++) {
Document doc = new Document();
doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -172,7 +172,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
public void testNoWaitClose() throws IOException {
MockDirectoryWrapper directory = newDirectory();
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));

View File

@ -44,8 +44,8 @@ public class TestCrash extends LuceneTestCase {
}
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "0", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<157;i++)
writer.addDocument(doc);

View File

@ -795,7 +795,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -206,7 +206,7 @@ public class TestDirectoryReader extends LuceneTestCase {
new MockAnalyzer()).setOpenMode(
create ? OpenMode.CREATE : OpenMode.APPEND));
Document doc = new Document();
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();
}

View File

@ -127,8 +127,8 @@ public class TestDocumentWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.commit();
@ -192,7 +192,7 @@ public class TestDocumentWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.commit();
@ -269,11 +269,11 @@ public class TestDocumentWriter extends LuceneTestCase {
public void testMixedTermVectorSettingsSameField() throws Exception {
Document doc = new Document();
// f1 first without tv then with tv
doc.add(new Field("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
doc.add(new Field("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
doc.add(newField("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
// f2 first with tv then without tv
doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
doc.add(newField("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
@ -302,13 +302,13 @@ public class TestDocumentWriter extends LuceneTestCase {
public void testLUCENE_1590() throws Exception {
Document doc = new Document();
// f1 has no norms
doc.add(new Field("f1", "v1", Store.NO, Index.ANALYZED_NO_NORMS));
doc.add(new Field("f1", "v2", Store.YES, Index.NO));
doc.add(newField("f1", "v1", Store.NO, Index.ANALYZED_NO_NORMS));
doc.add(newField("f1", "v2", Store.YES, Index.NO));
// f2 has no TF
Field f = new Field("f2", "v1", Store.NO, Index.ANALYZED);
Field f = newField("f2", "v1", Store.NO, Index.ANALYZED);
f.setOmitTermFreqAndPositions(true);
doc.add(f);
doc.add(new Field("f2", "v2", Store.YES, Index.NO));
doc.add(newField("f2", "v2", Store.YES, Index.NO));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));

View File

@ -133,15 +133,15 @@ public class TestFilterIndexReader extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d1 = new Document();
d1.add(new Field("default","one two", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("default","one two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("default","one three", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("default","one three", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d2);
Document d3 = new Document();
d3.add(new Field("default","two four", Field.Store.YES, Field.Index.ANALYZED));
d3.add(newField("default","two four", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d3);
writer.close();

View File

@ -37,10 +37,10 @@ public class TestFlex extends LuceneTestCase {
if (iter == 0) {
w.setMaxBufferedDocs(7);
Document doc = new Document();
doc.add(new Field("field1", "this is field1", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("field2", "this is field2", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("field3", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("field4", "bbb", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field1", "this is field1", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field2", "this is field2", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field3", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field4", "bbb", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i<DOC_COUNT;i++) {
w.addDocument(doc);
}
@ -64,7 +64,7 @@ public class TestFlex extends LuceneTestCase {
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
Document doc = new Document();
doc.add(new Field("f", "a b c", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("f", "a b c", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
TermsEnum terms = r.getSequentialSubReaders()[0].fields().terms("f").iterator();

View File

@ -219,8 +219,8 @@ public class TestIndexFileDeleter extends LuceneTestCase {
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -163,7 +163,14 @@ public class TestIndexReader extends LuceneTestCase
// set up writer
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
addDocumentWithFields(writer);
Document doc = new Document();
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
// set up reader
IndexReader reader = IndexReader.open(d, false);
@ -179,15 +186,31 @@ public class TestIndexReader extends LuceneTestCase
// want to get some more segments here
int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
for (int i = 0; i < 5*mergeFactor; i++) {
addDocumentWithFields(writer);
doc = new Document();
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// new fields are in some different segments (we hope)
for (int i = 0; i < 5*mergeFactor; i++) {
addDocumentWithDifferentFields(writer);
doc = new Document();
doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
// new termvector fields
for (int i = 0; i < 5*mergeFactor; i++) {
addDocumentWithTermVectorFields(writer);
doc = new Document();
doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
writer.close();
@ -869,8 +892,8 @@ public class TestIndexReader extends LuceneTestCase
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for(int i=0;i<157;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
if (0==i%10)
writer.commit();
@ -1137,11 +1160,11 @@ public class TestIndexReader extends LuceneTestCase
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random, dir);
Document doc = new Document();
doc.add(new Field("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
w.addDocument(doc);
doc = new Document();
w.commit();
doc.add(new Field("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexReader wr = SlowMultiReaderWrapper.wrap(r);
@ -1267,31 +1290,31 @@ public class TestIndexReader extends LuceneTestCase
private void addDocumentWithFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("text","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("unindexed","test1", Field.Store.YES, Field.Index.NO));
doc.add(newField("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("unindexed2","test1", Field.Store.YES, Field.Index.NO));
doc.add(newField("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(newField("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
@ -1299,7 +1322,7 @@ public class TestIndexReader extends LuceneTestCase
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void rmDir(File dir) {
@ -1558,7 +1581,7 @@ public class TestIndexReader extends LuceneTestCase
private Document createDocument(String id) {
Document doc = new Document();
doc.add(new Field("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
return doc;
}
@ -1608,7 +1631,7 @@ public class TestIndexReader extends LuceneTestCase
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
writer.close();
@ -1639,7 +1662,7 @@ public class TestIndexReader extends LuceneTestCase
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
writer.addDocument(doc);
writer.commit();
@ -1673,7 +1696,7 @@ public class TestIndexReader extends LuceneTestCase
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(-1));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
writer.commit();
@ -1714,8 +1737,8 @@ public class TestIndexReader extends LuceneTestCase
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
Document doc = new Document();
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.addDocument(doc);
writer.commit();
@ -1747,8 +1770,8 @@ public class TestIndexReader extends LuceneTestCase
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
Document doc = new Document();
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.addDocument(doc);
writer.close();

View File

@ -495,7 +495,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundFile(false);
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
Document doc = new Document();
doc.add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
IndexReader r1 = IndexReader.open(dir, false);

View File

@ -305,7 +305,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
Document d = new Document();
float boost = nextNorm();
for (int i = 0; i < 10; i++) {
Field f = new Field("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED);
Field f = newField("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED);
f.setBoost(boost);
d.add(f);
}

View File

@ -181,9 +181,9 @@ public class TestIndexReaderReopen extends LuceneTestCase {
for (int i=0; i<4; i++) {
for (int j=0; j<M; j++) {
Document doc = new Document();
doc.add(new Field("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED));
doc.add(new Field("id2", i+"_"+j, Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("id3", i+"_"+j, Store.YES, Index.NO));
doc.add(newField("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED));
doc.add(newField("id2", i+"_"+j, Store.YES, Index.NOT_ANALYZED_NO_NORMS));
doc.add(newField("id3", i+"_"+j, Store.YES, Index.NO));
iwriter.addDocument(doc);
if (i>0) {
int k = i-1;
@ -1196,7 +1196,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
for(int i=0;i<4;i++) {
Document doc = new Document();
doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
Map<String,String> data = new HashMap<String,String>();
data.put("index", i+"");

View File

@ -148,18 +148,18 @@ public class TestIndexWriter extends LuceneTestCase {
dir.close();
}
private static void addDoc(IndexWriter writer) throws IOException
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -555,7 +555,7 @@ public class TestIndexWriter extends LuceneTestCase {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
LogDocMergePolicy ldmp = new LogDocMergePolicy();
@ -596,7 +596,7 @@ public class TestIndexWriter extends LuceneTestCase {
MockDirectoryWrapper dir = newDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
LogDocMergePolicy ldmp = new LogDocMergePolicy();
ldmp.setMinMergeDocs(1);
@ -1137,12 +1137,12 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
@ -1169,7 +1169,7 @@ public class TestIndexWriter extends LuceneTestCase {
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.listAll().length;
// Verify that with a tiny RAM buffer we see new
@ -1198,7 +1198,7 @@ public class TestIndexWriter extends LuceneTestCase {
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
@ -1255,7 +1255,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -1315,7 +1315,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
@ -1324,7 +1324,7 @@ public class TestIndexWriter extends LuceneTestCase {
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -1339,7 +1339,7 @@ public class TestIndexWriter extends LuceneTestCase {
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
@ -1359,7 +1359,7 @@ public class TestIndexWriter extends LuceneTestCase {
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
@ -1380,7 +1380,7 @@ public class TestIndexWriter extends LuceneTestCase {
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
@ -1412,7 +1412,7 @@ public class TestIndexWriter extends LuceneTestCase {
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
@ -1477,7 +1477,7 @@ public class TestIndexWriter extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true, true);
@ -1496,7 +1496,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
writer.addDocument(new Document());
@ -1520,7 +1520,7 @@ public class TestIndexWriter extends LuceneTestCase {
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<200;i++)
writer.addDocument(doc);
writer.optimize(false);
@ -1573,7 +1573,7 @@ public class TestIndexWriter extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
iw.addDocument(document);
iw.close();
dir.close();
@ -1585,17 +1585,17 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@ -1612,13 +1612,13 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
@ -1626,7 +1626,7 @@ public class TestIndexWriter extends LuceneTestCase {
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@ -1648,7 +1648,7 @@ public class TestIndexWriter extends LuceneTestCase {
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
@ -1692,7 +1692,7 @@ public class TestIndexWriter extends LuceneTestCase {
lmp.setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
@ -1725,7 +1725,7 @@ public class TestIndexWriter extends LuceneTestCase {
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
doc.add(newField("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
@ -1735,13 +1735,13 @@ public class TestIndexWriter extends LuceneTestCase {
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(doc);
@ -1813,7 +1813,7 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
doc.add(newField("content", contents, Field.Store.NO,
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
@ -1869,13 +1869,13 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
doc.add(newField("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
@ -1885,7 +1885,7 @@ public class TestIndexWriter extends LuceneTestCase {
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
@ -1914,7 +1914,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
analyzer).setMaxBufferedDocs(10));
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
@ -1965,13 +1965,13 @@ public class TestIndexWriter extends LuceneTestCase {
try {
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
doc.add(newField("other", "this will not get indexed", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
@ -1981,7 +1981,7 @@ public class TestIndexWriter extends LuceneTestCase {
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
@ -2027,7 +2027,7 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
doc.add(newField("contents", "here are some contents", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
@ -2063,21 +2063,21 @@ public class TestIndexWriter extends LuceneTestCase {
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
doc.add(newField("content3", "", Field.Store.NO,
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
doc.add(newField("content4", contents, Field.Store.YES,
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
doc.add(newField("content1", contents, storeVal,
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
doc.add(newField("content3", "", Field.Store.YES,
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
doc.add(newField("content5", "", storeVal,
Field.Index.ANALYZED));
}
@ -2105,7 +2105,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory directory = newDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {
@ -2206,7 +2206,7 @@ public class TestIndexWriter extends LuceneTestCase {
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
@ -2322,7 +2322,7 @@ public class TestIndexWriter extends LuceneTestCase {
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
@ -2419,7 +2419,7 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
@ -2614,7 +2614,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -2765,7 +2765,7 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
@ -2773,7 +2773,7 @@ public class TestIndexWriter extends LuceneTestCase {
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
@ -2816,7 +2816,7 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
writer.addDocument(document);
@ -2824,7 +2824,7 @@ public class TestIndexWriter extends LuceneTestCase {
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -2853,10 +2853,10 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -2896,7 +2896,7 @@ public class TestIndexWriter extends LuceneTestCase {
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -2919,10 +2919,10 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -2963,10 +2963,10 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -3008,10 +3008,10 @@ public class TestIndexWriter extends LuceneTestCase {
Document document = new Document();
document = new Document();
Field storedField = new Field("stored", "stored", Field.Store.YES,
Field storedField = newField("stored", "stored", Field.Store.YES,
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field termVectorField = newField("termVector", "termVector",
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -3044,7 +3044,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
dir.close();
@ -3072,7 +3072,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.doFail = true;
@ -3092,7 +3092,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
@ -3104,7 +3104,7 @@ public class TestIndexWriter extends LuceneTestCase {
};
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
@ -3146,7 +3146,7 @@ public class TestIndexWriter extends LuceneTestCase {
MockIndexWriter2 w = new MockIndexWriter2(dir, conf);
w.doFail = true;
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
@ -3187,7 +3187,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
MockIndexWriter3 w = new MockIndexWriter3(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
@ -3244,7 +3244,7 @@ public class TestIndexWriter extends LuceneTestCase {
FailOnlyInCommit failure = new FailOnlyInCommit();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
doc.add(newField("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
@ -3295,7 +3295,7 @@ public class TestIndexWriter extends LuceneTestCase {
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
@ -3652,7 +3652,7 @@ public class TestIndexWriter extends LuceneTestCase {
dir.close();
}
private abstract static class RunAddIndexesThreads {
private abstract class RunAddIndexesThreads {
Directory dir, dir2;
final static int NUM_INIT_DOCS = 17;
@ -4094,10 +4094,10 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f = newField("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
Field f2 = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
@ -4129,7 +4129,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f = newField("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
@ -4151,7 +4151,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f = newField("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
@ -4198,7 +4198,7 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f = newField("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f);
w.addDocument(doc);
@ -4221,9 +4221,9 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field f = newField("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
@ -4252,9 +4252,9 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field f = newField("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f2 = new Field("field", "crunch man", Field.Store.NO,
Field f2 = newField("field", "crunch man", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(f2);
@ -4281,12 +4281,12 @@ public class TestIndexWriter extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
Field f = newField("field", "abcd", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field f2 = new Field("field", "crunch", Field.Store.NO,
Field f2 = newField("field", "crunch", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f2);
@ -4333,7 +4333,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
@ -4386,7 +4386,7 @@ public class TestIndexWriter extends LuceneTestCase {
w = new IndexWriter(dir, conf);
Document doc = new Document();
doc.add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<100;i++) {
w.addDocument(doc);
if (i%10 == 0) {
@ -4495,7 +4495,7 @@ public class TestIndexWriter extends LuceneTestCase {
Document doc = new Document();
Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
doc.add(f);
doc.add(f2);
@ -4552,9 +4552,9 @@ public class TestIndexWriter extends LuceneTestCase {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
doc = r.document(0);
@ -4584,10 +4584,10 @@ public class TestIndexWriter extends LuceneTestCase {
Directory d = newDirectory();
IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "a", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
@ -4634,7 +4634,7 @@ public class TestIndexWriter extends LuceneTestCase {
try {
final Document doc = new Document();
IndexReader r = IndexReader.open(dir);
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
int count = 0;
do {
@ -4731,7 +4731,7 @@ public class TestIndexWriter extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
Document d = new Document();
// Single segment
Field f = new Field("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
d.add(f);
char[] chars = new char[2];
final Set<String> allTerms = new HashSet<String>();
@ -4799,7 +4799,7 @@ public class TestIndexWriter extends LuceneTestCase {
s.append(' ').append(""+i);
}
Document d = new Document();
Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
w.addDocument(d);
IndexReader r = w.getReader(2).getSequentialSubReaders()[0];
@ -4824,7 +4824,7 @@ public class TestIndexWriter extends LuceneTestCase {
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
((LogMergePolicy) w.getMergePolicy()).setUseCompoundFile(true);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r;
if (iter == 0) {
@ -4887,7 +4887,7 @@ public class TestIndexWriter extends LuceneTestCase {
// First commit
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(1, IndexReader.listCommits(dir).size());
@ -4897,7 +4897,7 @@ public class TestIndexWriter extends LuceneTestCase {
// Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.commit();
assertEquals(2, IndexReader.listCommits(dir).size());
@ -4928,7 +4928,7 @@ public class TestIndexWriter extends LuceneTestCase {
FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
//w.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
int num = 6 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < num; iter++) {
int count = 0;
@ -5004,13 +5004,13 @@ public class TestIndexWriter extends LuceneTestCase {
Document doc = new Document();
// create as many files as possible
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Adding just one document does not call flush yet.
assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// The second document should cause a flush.
assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount);
@ -5034,7 +5034,7 @@ public class TestIndexWriter extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
Document doc = new Document();
doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
@ -5140,7 +5140,7 @@ public class TestIndexWriter extends LuceneTestCase {
final List<Integer> fieldIDs = new ArrayList<Integer>();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
for(int i=0;i<fieldCount;i++) {
fieldIDs.add(i);
@ -5163,7 +5163,7 @@ public class TestIndexWriter extends LuceneTestCase {
final String s;
if (rand.nextInt(4) != 3) {
s = _TestUtil.randomUnicodeString(rand, 1000);
doc.add(new Field("f"+field, s, Field.Store.YES, Field.Index.NO));
doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
} else {
s = null;
}
@ -5248,7 +5248,7 @@ public class TestIndexWriter extends LuceneTestCase {
((LogMergePolicy) w.getMergePolicy()).setMergeFactor(2);
Document doc = new Document();
doc.add(new Field("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();

View File

@ -48,14 +48,14 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
doc.add(newField("id", keywords[i], Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
doc.add(newField("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
doc.add(newField("contents", unstored[i], Field.Store.NO,
Field.Index.ANALYZED));
doc
.add(new Field("city", text[i], Field.Store.YES,
.add(newField("city", text[i], Field.Store.YES,
Field.Index.ANALYZED));
modifier.addDocument(doc);
}
@ -370,10 +370,10 @@ public class TestIndexWriterDelete extends LuceneTestCase {
private void updateDoc(IndexWriter modifier, int id, int value)
throws IOException {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", String.valueOf(id), Field.Store.YES,
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("id", String.valueOf(id), Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("value", String.valueOf(value), Field.Store.NO,
doc.add(newField("value", String.valueOf(value), Field.Store.NO,
Field.Index.NOT_ANALYZED));
modifier.updateDocument(new Term("id", String.valueOf(id)), doc);
}
@ -382,10 +382,10 @@ public class TestIndexWriterDelete extends LuceneTestCase {
private void addDoc(IndexWriter modifier, int id, int value)
throws IOException {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", String.valueOf(id), Field.Store.YES,
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("id", String.valueOf(id), Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("value", String.valueOf(value), Field.Store.NO,
doc.add(newField("value", String.valueOf(value), Field.Store.NO,
Field.Index.NOT_ANALYZED));
modifier.addDocument(doc);
}
@ -422,9 +422,9 @@ public class TestIndexWriterDelete extends LuceneTestCase {
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
for (int i = 0; i < 157; i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
d.add(newField("id", Integer.toString(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));
d.add(new Field("content", "aaa " + i, Field.Store.NO,
d.add(newField("content", "aaa " + i, Field.Store.NO,
Field.Index.ANALYZED));
writer.addDocument(d);
}
@ -496,9 +496,9 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < 13; i++) {
if (updates) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
d.add(newField("id", Integer.toString(i), Field.Store.YES,
Field.Index.NOT_ANALYZED));
d.add(new Field("content", "bbb " + i, Field.Store.NO,
d.add(newField("content", "bbb " + i, Field.Store.NO,
Field.Index.ANALYZED));
modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
} else { // deletes
@ -666,13 +666,13 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
doc.add(newField("id", keywords[i], Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
doc.add(newField("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
doc.add(newField("contents", unstored[i], Field.Store.NO,
Field.Index.ANALYZED));
doc.add(new Field("city", text[i], Field.Store.YES,
doc.add(newField("city", text[i], Field.Store.YES,
Field.Index.ANALYZED));
modifier.addDocument(doc);
}
@ -772,13 +772,13 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
doc.add(newField("id", keywords[i], Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
doc.add(newField("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
doc.add(newField("contents", unstored[i], Field.Store.NO,
Field.Index.ANALYZED));
doc.add(new Field("city", text[i], Field.Store.YES,
doc.add(newField("city", text[i], Field.Store.YES,
Field.Index.ANALYZED));
try {
modifier.addDocument(doc);

View File

@ -47,17 +47,17 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
final Document doc = new Document();
doc.add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
doc.add(newField("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
doc.add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
final Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
final long stopTime = System.currentTimeMillis() + 500;

View File

@ -214,7 +214,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
private void addDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -103,7 +103,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
for (int i = start; i < (start + numDocs); i++)
{
Document temp = new Document();
temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
temp.add(newField("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(temp);
}

View File

@ -81,7 +81,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
Document newDoc = r1.document(10);
newDoc.removeField("id");
newDoc.add(new Field("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
writer.updateDocument(new Term("id", id10), newDoc);
assertFalse(r1.isCurrent());
@ -102,7 +102,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
assertTrue(r2.isCurrent());
assertTrue(r3.isCurrent());
@ -776,8 +776,8 @@ public class TestIndexWriterReader extends LuceneTestCase {
Directory dir = newDirectory();
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(id);
id.setValue("0");
w.addDocument(doc);
@ -800,8 +800,8 @@ public class TestIndexWriterReader extends LuceneTestCase {
Directory dir = newDirectory();
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(id);
id.setValue("0");
w.addDocument(doc);
@ -850,7 +850,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
});
Document doc = new Document();
doc.add(new Field("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
for(int i=0;i<20;i++) {
w.addDocument(doc);
}

View File

@ -45,7 +45,7 @@ public class TestIsCurrent extends LuceneTestCaseJ4 {
// write document
Document doc = new Document();
doc.add(new Field("UUID", "1", Store.YES, Index.ANALYZED));
doc.add(newField("UUID", "1", Store.YES, Index.ANALYZED));
writer.addDocument(doc);
writer.commit();
}

View File

@ -78,7 +78,7 @@ public class TestLazyBug extends LuceneTestCase {
for (int d = 1; d <= NUM_DOCS; d++) {
Document doc = new Document();
for (int f = 1; f <= NUM_FIELDS; f++ ) {
doc.add(new Field("f"+f,
doc.add(newField("f"+f,
data[f % data.length]
+ '#' + data[random.nextInt(data.length)],
Field.Store.YES,

View File

@ -85,7 +85,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
content = this.term3 + " " + this.term2;
}
doc.add(new Field(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -129,7 +129,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 10; i++) {
Document doc = new Document();
doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -39,9 +39,9 @@ public class TestMultiFields extends LuceneTestCase {
int numDocs = _TestUtil.nextInt(random, 1, 100 * RANDOM_MULTIPLIER);
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
Field f = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(f);
Field id = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
doc.add(id);
boolean onlyUniqueTerms = random.nextBoolean();
@ -132,7 +132,7 @@ public class TestMultiFields extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d = new Document();
d.add(new Field("f", "j", Field.Store.NO, Field.Index.NOT_ANALYZED));
d.add(newField("f", "j", Field.Store.NO, Field.Index.NOT_ANALYZED));
w.addDocument(d);
w.commit();
w.addDocument(d);

View File

@ -66,7 +66,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++) {
Document d1 = new Document();
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
d1.add(newField(term.field(), term.text(), Store.NO, Index.ANALYZED));
writer.addDocument(d1);
}
writer.commit();

View File

@ -79,7 +79,7 @@ public class TestNoDeletionPolicy extends LuceneTestCaseJ4 {
.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
for (int i = 0; i < 10; i++) {
Document doc = new Document();
doc.add(new Field("c", "a" + i, Store.YES, Index.ANALYZED));
doc.add(newField("c", "a" + i, Store.YES, Index.ANALYZED));
writer.addDocument(doc);
writer.commit();
assertEquals("wrong number of commits !", i + 1, IndexReader.listCommits(dir).size());

View File

@ -208,7 +208,7 @@ public class TestNorms extends LuceneTestCase {
Document d = new Document();
float boost = nextNorm();
for (int i = 0; i < 10; i++) {
Field f = new Field("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
Field f = newField("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
f.setBoost(boost);
d.add(f);
}

View File

@ -64,11 +64,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
f2.setOmitTermFreqAndPositions(true);
d.add(f2);
@ -113,11 +113,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
f2.setOmitTermFreqAndPositions(true);
d.add(f2);
@ -166,11 +166,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f2);
for(int i=0;i<5;i++)
@ -216,7 +216,7 @@ public class TestOmitTf extends LuceneTestCase {
lmp.setUseCompoundDocStore(false);
Document d = new Document();
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
f1.setOmitTermFreqAndPositions(true);
d.add(f1);
@ -252,11 +252,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
sb.append(term).append(" ");
String content = sb.toString();
Field noTf = new Field("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED);
Field noTf = newField("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED);
noTf.setOmitTermFreqAndPositions(true);
d.add(noTf);
Field tf = new Field("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.ANALYZED);
Field tf = newField("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.ANALYZED);
d.add(tf);
writer.addDocument(d);

View File

@ -123,7 +123,7 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d3);
w2.close();
@ -177,14 +177,14 @@ public class TestParallelReader extends LuceneTestCase {
IndexWriter modifier = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
((LogMergePolicy) modifier.getMergePolicy()).setMergeFactor(10);
Document d = new Document();
d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
modifier.close();
modifier = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
((LogMergePolicy) modifier.getMergePolicy()).setMergeFactor(10);
d = new Document();
d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
modifier.close();
@ -241,16 +241,16 @@ public class TestParallelReader extends LuceneTestCase {
dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(d2);
w.close();
@ -271,12 +271,12 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = newDirectory();
IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(newField("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
w1.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(newField("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
w1.addDocument(d2);
w1.close();
return dir1;
@ -286,12 +286,12 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir2 = newDirectory();
IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(newField("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(newField("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d3);
Document d4 = new Document();
d4.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d4.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
d4.add(newField("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d4.add(newField("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d4);
w2.close();
return dir2;

View File

@ -79,10 +79,10 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
{
IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("test", "", Store.NO, Index.ANALYZED,
doc.add(newField("test", "", Store.NO, Index.ANALYZED,
TermVector.YES));
iw.addDocument(doc);
doc.add(new Field("test", "", Store.NO, Index.ANALYZED,
doc.add(newField("test", "", Store.NO, Index.ANALYZED,
TermVector.NO));
iw.addDocument(doc);
iw.close();

View File

@ -42,11 +42,11 @@ public class TestParallelTermEnum extends LuceneTestCase {
IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
doc = new Document();
doc.add(new Field("field1", "the quick brown fox jumps", Store.YES,
doc.add(newField("field1", "the quick brown fox jumps", Store.YES,
Index.ANALYZED));
doc.add(new Field("field2", "the quick brown fox jumps", Store.YES,
doc.add(newField("field2", "the quick brown fox jumps", Store.YES,
Index.ANALYZED));
doc.add(new Field("field4", "", Store.NO, Index.ANALYZED));
doc.add(newField("field4", "", Store.NO, Index.ANALYZED));
iw1.addDocument(doc);
iw1.close();
@ -54,10 +54,10 @@ public class TestParallelTermEnum extends LuceneTestCase {
IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
doc = new Document();
doc.add(new Field("field0", "", Store.NO, Index.ANALYZED));
doc.add(new Field("field1", "the fox jumps over the lazy dog",
doc.add(newField("field0", "", Store.NO, Index.ANALYZED));
doc.add(newField("field1", "the fox jumps over the lazy dog",
Store.YES, Index.ANALYZED));
doc.add(new Field("field3", "the fox jumps over the lazy dog",
doc.add(newField("field3", "the fox jumps over the lazy dog",
Store.YES, Index.ANALYZED));
iw2.addDocument(doc);

View File

@ -140,8 +140,8 @@ public class TestPayloadProcessorProvider extends LuceneTestCaseJ4 {
TokenStream payloadTS2 = new PayloadTokenStream("p2");
for (int i = 0; i < NUM_DOCS; i++) {
Document doc = new Document();
doc.add(new Field("id", "doc" + i, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
doc.add(new Field("content", "doc content " + i, Store.NO, Index.ANALYZED));
doc.add(newField("id", "doc" + i, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
doc.add(newField("content", "doc content " + i, Store.NO, Index.ANALYZED));
doc.add(new Field("p", payloadTS1));
doc.add(new Field("p", payloadTS2));
writer.addDocument(doc);

View File

@ -103,15 +103,15 @@ public class TestPayloads extends LuceneTestCase {
IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
Document d = new Document();
// this field won't have any payloads
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
// this field will have payloads in all docs, however not for all term positions,
// so this field is used to check if the DocumentWriter correctly enables the payloads bit
// even if only some term positions have payloads
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
// this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads
// enabled in only some documents
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
// only add payload data for field f2
analyzer.setPayloadData("f2", 1, "somedata".getBytes(), 0, 1);
writer.addDocument(d);
@ -130,10 +130,10 @@ public class TestPayloads extends LuceneTestCase {
writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT,
analyzer).setOpenMode(OpenMode.CREATE));
d = new Document();
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
// add payload data for field f2 and f3
analyzer.setPayloadData("f2", "somedata".getBytes(), 0, 1);
analyzer.setPayloadData("f3", "somedata".getBytes(), 0, 3);
@ -196,7 +196,7 @@ public class TestPayloads extends LuceneTestCase {
byte[] payloadData = generateRandomData(payloadDataLength);
Document d = new Document();
d.add(new Field(fieldName, content, Field.Store.NO, Field.Index.ANALYZED));
d.add(newField(fieldName, content, Field.Store.NO, Field.Index.ANALYZED));
// add the same document multiple times to have the same payload lengths for all
// occurrences within two consecutive skip intervals
int offset = 0;
@ -317,7 +317,7 @@ public class TestPayloads extends LuceneTestCase {
String singleTerm = "lucene";
d = new Document();
d.add(new Field(fieldName, singleTerm, Field.Store.NO, Field.Index.ANALYZED));
d.add(newField(fieldName, singleTerm, Field.Store.NO, Field.Index.ANALYZED));
// add a payload whose length is greater than the buffer size of BufferedIndexOutput
payloadData = generateRandomData(2000);
analyzer.setPayloadData(fieldName, payloadData, 100, 1500);

View File

@ -33,7 +33,7 @@ public class TestRollback extends LuceneTestCase {
RandomIndexWriter rw = new RandomIndexWriter(random, dir);
for (int i = 0; i < 5; i++) {
Document doc = new Document();
doc.add(new Field("pk", Integer.toString(i), Store.YES, Index.ANALYZED_NO_NORMS));
doc.add(newField("pk", Integer.toString(i), Store.YES, Index.ANALYZED_NO_NORMS));
rw.addDocument(doc);
}
rw.close();
@ -45,8 +45,8 @@ public class TestRollback extends LuceneTestCase {
for (int i = 0; i < 3; i++) {
Document doc = new Document();
String value = Integer.toString(i);
doc.add(new Field("pk", value, Store.YES, Index.ANALYZED_NO_NORMS));
doc.add(new Field("text", "foo", Store.YES, Index.ANALYZED_NO_NORMS));
doc.add(newField("pk", value, Store.YES, Index.ANALYZED_NO_NORMS));
doc.add(newField("text", "foo", Store.YES, Index.ANALYZED_NO_NORMS));
w.updateDocument(pkTerm.createTerm(value), doc);
}
w.rollback();

View File

@ -264,7 +264,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -127,7 +127,7 @@ public class TestSegmentTermEnum extends LuceneTestCase {
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -111,7 +111,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCaseJ4 {
@Override
public void run() {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
do {
for(int i=0;i<27;i++) {
try {
@ -152,7 +152,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCaseJ4 {
// final segment, so deletion policy has a chance to
// delete again:
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Make sure we don't have any leftover files in the

View File

@ -80,8 +80,8 @@ public class TestStressIndexing extends MultiCodecTestCase {
for(int j=0; j<10; j++) {
Document d = new Document();
int n = random.nextInt();
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}

View File

@ -530,20 +530,26 @@ public class TestStressIndexing2 extends MultiCodecTestCase {
for(int j=0;j<numTerms;j++) {
int[] pos1 = tpv1.getTermPositions(j);
int[] pos2 = tpv2.getTermPositions(j);
assertEquals(pos1.length, pos2.length);
TermVectorOffsetInfo[] offsets1 = tpv1.getOffsets(j);
TermVectorOffsetInfo[] offsets2 = tpv2.getOffsets(j);
if (offsets1 == null)
assertTrue(offsets2 == null);
else
assertTrue(offsets2 != null);
for(int k=0;k<pos1.length;k++) {
assertEquals(pos1[k], pos2[k]);
if (offsets1 != null) {
assertEquals(offsets1[k].getStartOffset(),
offsets2[k].getStartOffset());
assertEquals(offsets1[k].getEndOffset(),
offsets2[k].getEndOffset());
if (pos1 == null) {
assertNull(pos2);
} else {
assertNotNull(pos1);
assertNotNull(pos2);
assertEquals(pos1.length, pos2.length);
TermVectorOffsetInfo[] offsets1 = tpv1.getOffsets(j);
TermVectorOffsetInfo[] offsets2 = tpv2.getOffsets(j);
if (offsets1 == null)
assertTrue(offsets2 == null);
else
assertTrue(offsets2 != null);
for(int k=0;k<pos1.length;k++) {
assertEquals(pos1[k], pos2[k]);
if (offsets1 != null) {
assertEquals(offsets1[k].getStartOffset(),
offsets2[k].getStartOffset());
assertEquals(offsets1[k].getEndOffset(),
offsets2[k].getEndOffset());
}
}
}
}
@ -551,7 +557,7 @@ public class TestStressIndexing2 extends MultiCodecTestCase {
}
}
private static class IndexingThread extends Thread {
private class IndexingThread extends Thread {
IndexWriter w;
int base;
int range;
@ -639,7 +645,7 @@ public class TestStressIndexing2 extends MultiCodecTestCase {
ArrayList<Field> fields = new ArrayList<Field>();
String idString = getIdString();
Field idField = new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
Field idField = newField(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
fields.add(idField);
int nFields = nextInt(maxFields);
@ -663,16 +669,16 @@ public class TestStressIndexing2 extends MultiCodecTestCase {
switch (nextInt(4)) {
case 0:
fields.add(new Field("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal));
fields.add(newField("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal));
break;
case 1:
fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal));
fields.add(newField("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal));
break;
case 2:
fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
fields.add(newField("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
break;
case 3:
fields.add(new Field("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal));
fields.add(newField("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal));
break;
}
}

View File

@ -69,7 +69,7 @@ public class TestTermdocPerf extends LuceneTestCase {
};
Document doc = new Document();
doc.add(new Field(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
doc.add(newField(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, analyzer)
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));

View File

@ -66,8 +66,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
for(int i=0;i<200;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
@ -87,8 +87,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
writerFinal.optimize(false);
for(int k=0;k<17*(1+iFinal);k++) {
Document d = new Document();
d.add(new Field("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED));
writerFinal.addDocument(d);
}
for(int k=0;k<9*(1+iFinal);k++)

View File

@ -130,7 +130,7 @@ public class TestTransactionRollback extends LuceneTestCase {
IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(sdp));
for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) {
Document doc=new Document();
doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED));
doc.add(newField(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED));
w.addDocument(doc);
if (currentRecordId%10 == 0) {

View File

@ -140,8 +140,8 @@ public class TestTransactions extends LuceneTestCase {
for(int j=0; j<10; j++) {
Document d = new Document();
int n = random.nextInt();
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
@ -185,7 +185,7 @@ public class TestTransactions extends LuceneTestCase {
for(int j=0; j<7; j++) {
Document d = new Document();
int n = random.nextInt();
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -297,7 +297,7 @@ public class TestSurrogates extends LuceneTestCaseJ4 {
uniqueTerms.add(term);
fieldTerms.add(new Term(field, term));
Document doc = new Document();
doc.add(new Field(field, term, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(newField(field, term, Field.Store.NO, Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
uniqueTermCount += uniqueTerms.size();

View File

@ -284,7 +284,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();

View File

@ -571,7 +571,7 @@ public class TestQueryParser extends LocalizedTestCase {
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
Document doc = new Document();
doc.add(new Field("content","\u0633\u0627\u0628",
doc.add(newField("content","\u0633\u0627\u0628",
Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(doc);
iw.close();
@ -1131,13 +1131,13 @@ public class TestQueryParser extends LocalizedTestCase {
assertEquals(expected, hits.length);
}
private static void addDateDoc(String content, int year, int month,
private void addDateDoc(String content, int year, int month,
int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance(Locale.ENGLISH);
cal.set(year, month-1, day, hour, minute, second);
d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}
@ -1155,7 +1155,7 @@ public class TestQueryParser extends LocalizedTestCase {
Analyzer a = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a));
Document doc = new Document();
doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();
w.close();

View File

@ -111,7 +111,7 @@ public class BaseTestRangeFilter extends LuceneTestCase {
for (int d = minId; d <= maxId; d++) {
Document doc = new Document();
doc.add(new Field("id", pad(d), Field.Store.YES,
doc.add(newField("id", pad(d), Field.Store.YES,
Field.Index.NOT_ANALYZED));
int r = index.allowNegativeRandomInts ? random.nextInt() : random
.nextInt(Integer.MAX_VALUE);
@ -121,9 +121,9 @@ public class BaseTestRangeFilter extends LuceneTestCase {
if (r < index.minR) {
index.minR = r;
}
doc.add(new Field("rand", pad(r), Field.Store.YES,
doc.add(newField("rand", pad(r), Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "body", Field.Store.YES,
doc.add(newField("body", "body", Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}

View File

@ -43,11 +43,11 @@ public class TestAutomatonQuery extends LuceneTestCase {
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field titleField = newField("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
Field field = new Field(FN, "this is document one 2345", Field.Store.NO,
Field field = newField(FN, "this is document one 2345", Field.Store.NO,
Field.Index.ANALYZED);
Field footerField = new Field("footer", "a footer", Field.Store.NO,
Field footerField = newField("footer", "a footer", Field.Store.NO,
Field.Index.ANALYZED);
doc.add(titleField);
doc.add(field);

View File

@ -46,11 +46,11 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
directory = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field titleField = newField("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
Field field = new Field(FN, "", Field.Store.NO,
Field field = newField(FN, "", Field.Store.NO,
Field.Index.ANALYZED);
Field footerField = new Field("footer", "a footer", Field.Store.NO,
Field footerField = newField("footer", "a footer", Field.Store.NO,
Field.Index.ANALYZED);
doc.add(titleField);
doc.add(field);

View File

@ -54,7 +54,7 @@ public class TestBoolean2 extends LuceneTestCase {
RandomIndexWriter writer= new RandomIndexWriter(random, directory);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
@ -77,12 +77,12 @@ public class TestBoolean2 extends LuceneTestCase {
RandomIndexWriter w = new RandomIndexWriter(random, dir2);
Document doc = new Document();
doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
w.addDocument(doc);
}
doc = new Document();
doc.add(new Field("field2", "big bad bug", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field2", "big bad bug", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
w.addDocument(doc);
}

View File

@ -56,10 +56,10 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
doc.add(newField("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(newField("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
doc.add(newField("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
}
w.addDocument(doc);
}

View File

@ -143,12 +143,12 @@ public class TestBooleanOr extends LuceneTestCase {
//
Document d = new Document();
d.add(new Field(
d.add(newField(
FIELD_T,
"Optimize not deleting all files",
Field.Store.YES,
Field.Index.ANALYZED));
d.add(new Field(
d.add(newField(
FIELD_C,
"Deleted When I run an optimize in our production environment.",
Field.Store.YES,

View File

@ -78,7 +78,7 @@ public class TestBooleanPrefixQuery extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
reader = writer.getReader();

View File

@ -61,7 +61,7 @@ public class TestBooleanQuery extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random, dir);
Document doc = new Document();
doc.add(new Field("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
doc.add(newField("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
IndexReader r = w.getReader();

View File

@ -46,7 +46,7 @@ public class TestBooleanScorer extends LuceneTestCase
RandomIndexWriter writer = new RandomIndexWriter(random, directory);
for (int i = 0; i < values.length; i++) {
Document doc = new Document();
doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
IndexReader ir = writer.getReader();

View File

@ -42,7 +42,7 @@ public class TestCachingSpanFilter extends LuceneTestCase {
// add a doc, refresh the reader, and check that its there
Document doc = new Document();
doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
reader = refreshReader(reader);

View File

@ -164,7 +164,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// add a doc, refresh the reader, and check that its there
Document doc = new Document();
doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(newField("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
reader = refreshReader(reader);

View File

@ -56,15 +56,15 @@ public class TestCustomSearcherSort extends LuceneTestCase implements Serializab
Document doc = new Document();
if ((i % 5) != 0) { // some documents must not have an entry in the first
// sort field
doc.add(new Field("publicationDate_", random.getLuceneDate(),
doc.add(newField("publicationDate_", random.getLuceneDate(),
Field.Store.YES, Field.Index.NOT_ANALYZED));
}
if ((i % 7) == 0) { // some documents to match the query (see below)
doc.add(new Field("content", "test", Field.Store.YES,
doc.add(newField("content", "test", Field.Store.YES,
Field.Index.ANALYZED));
}
// every document has a defined 'mandant' field
doc.add(new Field("mandant", Integer.toString(i % 3), Field.Store.YES,
doc.add(newField("mandant", Integer.toString(i % 3), Field.Store.YES,
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}

View File

@ -51,10 +51,10 @@ public class TestDateFilter extends LuceneTestCase {
Document doc = new Document();
// add time that is in the past
doc.add(new Field("datefield", DateTools.timeToString(now - 1000,
doc.add(newField("datefield", DateTools.timeToString(now - 1000,
DateTools.Resolution.MILLISECOND), Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "Today is a very sunny day in New York City",
doc.add(newField("body", "Today is a very sunny day in New York City",
Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
@ -117,10 +117,10 @@ public class TestDateFilter extends LuceneTestCase {
Document doc = new Document();
// add time that is in the future
doc.add(new Field("datefield", DateTools.timeToString(now + 888888,
doc.add(newField("datefield", DateTools.timeToString(now + 888888,
DateTools.Resolution.MILLISECOND), Field.Store.YES,
Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "Today is a very sunny day in New York City",
doc.add(newField("body", "Today is a very sunny day in New York City",
Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);

View File

@ -106,16 +106,16 @@ public class TestDateSort extends LuceneTestCase {
assertEquals(Arrays.asList(expectedOrder), Arrays.asList(actualOrder));
}
private static Document createDocument(String text, long time) {
private Document createDocument(String text, long time) {
Document document = new Document();
// Add the text field.
Field textField = new Field(TEXT_FIELD, text, Field.Store.YES, Field.Index.ANALYZED);
Field textField = newField(TEXT_FIELD, text, Field.Store.YES, Field.Index.ANALYZED);
document.add(textField);
// Add the date/time field.
String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
Field dateTimeField = new Field(DATE_TIME_FIELD, dateTimeString, Field.Store.YES,
Field dateTimeField = newField(DATE_TIME_FIELD, dateTimeString, Field.Store.YES,
Field.Index.NOT_ANALYZED);
document.add(dateTimeField);

View File

@ -88,13 +88,13 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
// d1 is an "ok" match for: albino elephant
{
Document d1 = new Document();
d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
d1.add(newField("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
// "d1"));
d1
.add(new Field("hed", "elephant", Field.Store.YES,
.add(newField("hed", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
d1
.add(new Field("dek", "elephant", Field.Store.YES,
.add(newField("dek", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
writer.addDocument(d1);
}
@ -102,15 +102,15 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
// d2 is a "good" match for: albino elephant
{
Document d2 = new Document();
d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
d2.add(newField("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
// "d2"));
d2
.add(new Field("hed", "elephant", Field.Store.YES,
.add(newField("hed", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
d2.add(newField("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
// "albino"));
d2
.add(new Field("dek", "elephant", Field.Store.YES,
.add(newField("dek", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
writer.addDocument(d2);
}
@ -118,12 +118,12 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
// d3 is a "better" match for: albino elephant
{
Document d3 = new Document();
d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
d3.add(newField("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
// "d3"));
d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
d3.add(newField("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
// "albino"));
d3
.add(new Field("hed", "elephant", Field.Store.YES,
.add(newField("hed", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
writer.addDocument(d3);
}
@ -131,14 +131,14 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
// d4 is the "best" match for: albino elephant
{
Document d4 = new Document();
d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
d4.add(newField("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
// "d4"));
d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
d4.add(newField("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
// "albino"));
d4
.add(new Field("hed", "elephant", Field.Store.YES,
.add(newField("hed", "elephant", Field.Store.YES,
Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
d4.add(newField("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
// "albino"));
writer.addDocument(d4);
}

View File

@ -40,8 +40,8 @@ public class TestDocBoost extends LuceneTestCase {
Directory store = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, store);
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f1 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = newField("field", "word", Field.Store.YES, Field.Index.ANALYZED);
f2.setBoost(2.0f);
Document d1 = new Document();

View File

@ -103,7 +103,7 @@ public class TestDocIdSet extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random, dir);
Document doc = new Document();
doc.add(new Field("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
doc.add(newField("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();

View File

@ -119,7 +119,7 @@ public class TestElevationComparator extends LuceneTestCase {
private Document adoc(String[] vals) {
Document doc = new Document();
for (int i = 0; i < vals.length - 2; i += 2) {
doc.add(new Field(vals[i], vals[i + 1], Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField(vals[i], vals[i + 1], Field.Store.YES, Field.Index.ANALYZED));
}
return doc;
}

Some files were not shown because too many files have changed in this diff Show More