remove the last calls of the deprecated Field API

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@150497 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Daniel Naber 2004-09-07 18:26:36 +00:00
parent 3305cee1b6
commit 89d41cd8f1
11 changed files with 21 additions and 19 deletions

View File

@ -40,7 +40,7 @@ public class TestNot extends TestCase {
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
Document d1 = new Document();
d1.add(Field.Text("field", "a b"));
d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(d1);
writer.optimize();

View File

@ -58,11 +58,11 @@ public class TestPhrasePrefixQuery
Document doc3 = new Document();
Document doc4 = new Document();
Document doc5 = new Document();
doc1.add(Field.Text("body", "blueberry pie"));
doc2.add(Field.Text("body", "blueberry strudel"));
doc3.add(Field.Text("body", "blueberry pizza"));
doc4.add(Field.Text("body", "blueberry chewing gum"));
doc5.add(Field.Text("body", "piccadilly circus"));
doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.TOKENIZED));
doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.TOKENIZED));
doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.TOKENIZED));
doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.TOKENIZED));
doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);

View File

@ -41,7 +41,7 @@ public class TestPhraseQuery extends TestCase {
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.add(Field.Text("field", "one two three four five"));
doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
writer.optimize();
@ -155,7 +155,7 @@ public class TestPhraseQuery extends TestCase {
StopAnalyzer stopAnalyzer = new StopAnalyzer();
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true);
Document doc = new Document();
doc.add(Field.Text("field", "the stop words are here"));
doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
writer.close();

View File

@ -65,7 +65,7 @@ public class TestPositionIncrement extends TestCase {
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, analyzer, true);
Document d = new Document();
d.add(Field.Text("field", "bogus"));
d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(d);
writer.optimize();
writer.close();

View File

@ -39,7 +39,7 @@ public class TestPrefixQuery extends TestCase {
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(Field.Keyword("category", categories[i]));
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
writer.addDocument(doc);
}
writer.close();

View File

@ -86,8 +86,8 @@ public class TestRangeQuery extends TestCase {
private void insertDoc(IndexWriter writer, String content) throws IOException {
Document doc = new Document();
doc.add(Field.Keyword("id", "id" + docCount));
doc.add(Field.UnStored("content", content));
doc.add(new Field("id", "id" + docCount, Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("content", content, Field.Store.NO, Field.Index.TOKENIZED));
writer.addDocument(doc);
docCount++;

View File

@ -54,7 +54,7 @@ public class TestRemoteSearchable extends TestCase {
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(),true);
Document doc = new Document();
doc.add(Field.Text("test", "test text"));
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -42,7 +42,7 @@ public class TestSetNorm extends TestCase {
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
// add the same document four times
Field f1 = Field.Text("field", "word");
Field f1 = new Field("field", "word", Field.Store.YES, Field.Index.TOKENIZED);
Document d1 = new Document();
d1.add(f1);
writer.addDocument(d1);

View File

@ -54,10 +54,10 @@ public class TestSimilarity extends TestCase {
writer.setSimilarity(new SimpleSimilarity());
Document d1 = new Document();
d1.add(Field.Text("field", "a c"));
d1.add(new Field("field", "a c", Field.Store.YES, Field.Index.TOKENIZED));
Document d2 = new Document();
d2.add(Field.Text("field", "a b c"));
d2.add(new Field("field", "a b c", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(d1);
writer.addDocument(d2);

View File

@ -43,7 +43,8 @@ public class TestTermVectors extends TestCase {
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(Field.Text("field", English.intToEnglish(i), true));
doc.add(new Field("field", English.intToEnglish(i),
Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
writer.addDocument(doc);
}
writer.close();
@ -212,7 +213,8 @@ public class TestTermVectors extends TestCase {
private void setupDoc(Document doc, String text)
{
doc.add(Field.Text("field", text, true));
doc.add(new Field("field", text, Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.YES));
//System.out.println("Document: " + doc);
}

View File

@ -115,7 +115,7 @@ public class TestWildcard
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
doc.add(Field.Text(field, contents[i]));
doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
}
writer.optimize();