LUCENE-1366: rename Field.Index.* options

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@694004 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-09-10 21:38:52 +00:00
parent ccf96e6b6b
commit 4218996230
143 changed files with 609 additions and 577 deletions

View File

@ -149,6 +149,11 @@ API Changes
which is equivalent to
getDirectory().fileModified(getSegmentsFileName()). (Mike McCandless)
23. LUCENE-1366: Rename Field.Index options to be more accurate:
TOKENIZED becomes ANALYZED; UN_TOKENIZED becomes NOT_ANALYZED;
NO_NORMS becomes NOT_ANALYZED_NO_NORMS and a new ANALYZED_NO_NORMS
is added. (Mike McCandless)
Bug fixes
1. LUCENE-1134: Fixed BooleanQuery.rewrite to only optimize a single

View File

@ -51,8 +51,8 @@ public class QueryAutoStopWordAnalyzerTest extends TestCase {
Document doc = new Document();
String variedFieldValue = variedFieldValues[i % variedFieldValues.length];
String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length];
doc.add(new Field("variedField", variedFieldValue, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("repetitiveField", repetitiveFieldValue, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("variedField", variedFieldValue, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("repetitiveField", repetitiveFieldValue, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();

View File

@ -65,17 +65,17 @@ public class ShingleAnalyzerWrapperTest extends TestCase {
Document doc;
doc = new Document();
doc.add(new Field("content", "please divide this sentence into shingles",
Field.Store.YES,Field.Index.TOKENIZED));
Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("content", "just another test sentence",
Field.Store.YES,Field.Index.TOKENIZED));
Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("content", "a sentence which contains no test",
Field.Store.YES,Field.Index.TOKENIZED));
Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();

View File

@ -95,8 +95,8 @@ public class HtmlDocument {
org.apache.lucene.document.Document luceneDoc =
new org.apache.lucene.document.Document();
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.ANALYZED));
return luceneDoc;
}
@ -119,8 +119,8 @@ public class HtmlDocument {
org.apache.lucene.document.Document luceneDoc =
new org.apache.lucene.document.Document();
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.ANALYZED));
String contents = null;
BufferedReader br =

View File

@ -330,12 +330,12 @@ public class IndexTask extends Task {
} else {
// Add the path of the file as a field named "path". Use a Keyword field, so
// that the index stores the path, and so that the path is searchable
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use a
// Keyword field, so that it's searchable, but so that no attempt is made
// to tokenize the field into words.
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
totalIndexed++;

View File

@ -79,8 +79,8 @@ public class TextDocument {
// make a new, empty document
Document doc = new Document();
doc.add(new Field("title", f.getName(), Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("contents", textDoc.getContents(), Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("title", f.getName(), Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("contents", textDoc.getContents(), Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("rawcontents", textDoc.getContents(), Field.Store.YES, Field.Index.NO));
// return the document

View File

@ -71,7 +71,7 @@ public abstract class BasicDocMaker implements DocMaker {
protected Config config;
protected Field.Store storeVal = Field.Store.NO;
protected Field.Index indexVal = Field.Index.TOKENIZED;
protected Field.Index indexVal = Field.Index.ANALYZED;
protected Field.TermVector termVecVal = Field.TermVector.NO;
private synchronized int incrNumDocsCreated() {
@ -196,7 +196,7 @@ public abstract class BasicDocMaker implements DocMaker {
boolean tokenized = config.get("doc.tokenized",true);
boolean termVec = config.get("doc.term.vector",false);
storeVal = (stored ? Field.Store.YES : Field.Store.NO);
indexVal = (tokenized ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED);
indexVal = (tokenized ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED);
boolean termVecPositions = config.get("doc.term.vector.positions",false);
boolean termVecOffsets = config.get("doc.term.vector.offsets",false);
if (termVecPositions && termVecOffsets)

View File

@ -63,19 +63,19 @@ public class LineDocMaker extends BasicDocMaker {
bodyField = new Field(BasicDocMaker.BODY_FIELD,
"",
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
titleField = new Field(BasicDocMaker.TITLE_FIELD,
"",
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
dateField = new Field(BasicDocMaker.DATE_FIELD,
"",
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
idField = new Field(BasicDocMaker.ID_FIELD, "", Field.Store.YES, Field.Index.NO_NORMS);
idField = new Field(BasicDocMaker.ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
doc = new Document();
doc.add(bodyField);
@ -111,17 +111,17 @@ public class LineDocMaker extends BasicDocMaker {
Field localTitleField = new Field(BasicDocMaker.TITLE_FIELD,
title,
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
Field localBodyField = new Field(BasicDocMaker.BODY_FIELD,
body,
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
Field localDateField = new Field(BasicDocMaker.BODY_FIELD,
date,
storeVal,
Field.Index.TOKENIZED,
Field.Index.ANALYZED,
termVecVal);
Document localDoc = new Document();
localDoc.add(localBodyField);

View File

@ -258,11 +258,11 @@ public class StandardBenchmarker extends AbstractBenchmarker implements Benchmar
for (int i = 0; i < tags.length; i++)
{
doc.add(new Field("tag" + i, tags[i], stored == true ? Field.Store.YES : Field.Store.NO,
tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
}
doc.add(new Field("file", in.getCanonicalPath(), stored == true ? Field.Store.YES : Field.Store.NO,
tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
BufferedReader reader = new BufferedReader(new FileReader(in));
String line = null;
//First line is the date, 3rd is the title, rest is body
@ -279,17 +279,17 @@ public class StandardBenchmarker extends AbstractBenchmarker implements Benchmar
Date date = format.parse(dateStr.trim());
doc.add(new Field("date", DateTools.dateToString(date, DateTools.Resolution.SECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("date", DateTools.dateToString(date, DateTools.Resolution.SECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
if (title != null)
{
doc.add(new Field("title", title, stored == true ? Field.Store.YES : Field.Store.NO,
tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
if (body.length() > 0)
{
doc.add(new Field("body", body.toString(), stored == true ? Field.Store.YES : Field.Store.NO,
tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
return doc;

View File

@ -1021,7 +1021,7 @@ public class HighlighterTest extends TestCase implements Formatter {
RAMDirectory ramDir1 = new RAMDirectory();
IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true);
Document d = new Document();
Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.TOKENIZED);
Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer1.addDocument(d);
writer1.optimize();
@ -1032,7 +1032,7 @@ public class HighlighterTest extends TestCase implements Formatter {
RAMDirectory ramDir2 = new RAMDirectory();
IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true);
d = new Document();
f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.TOKENIZED);
f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer2.addDocument(d);
writer2.optimize();
@ -1265,7 +1265,7 @@ public class HighlighterTest extends TestCase implements Formatter {
private Document doc( String f, String v ){
Document doc = new Document();
doc.add( new Field( f, v, Store.YES, Index.TOKENIZED ) );
doc.add( new Field( f, v, Store.YES, Index.ANALYZED ) );
return doc;
}
@ -1395,7 +1395,7 @@ public class HighlighterTest extends TestCase implements Formatter {
private void addDoc(IndexWriter writer, String text) throws IOException {
Document d = new Document();
Field f = new Field(FIELD_NAME, text, Field.Store.YES, Field.Index.TOKENIZED);
Field f = new Field(FIELD_NAME, text, Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer.addDocument(d);

View File

@ -148,18 +148,18 @@ public class TestIndicesEquals extends TestCase {
}
private void assembleDocument(Document document, int i) {
document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 0) {
document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO));
document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.NO));
document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.NO));
document.add(new Field("b3", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
if (i > 1) {
document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 2) {
document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 3) {
Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
f.setOmitNorms(true);
document.add(f);
if (i > 4) {

View File

@ -226,7 +226,7 @@ public class MemoryIndex implements Serializable {
* Convenience method; Tokenizes the given field text and adds the resulting
* terms to the index; Equivalent to adding an indexed non-keyword Lucene
* {@link org.apache.lucene.document.Field} that is
* {@link org.apache.lucene.document.Field.Index#TOKENIZED tokenized},
* {@link org.apache.lucene.document.Field.Index#ANALYZED tokenized},
* {@link org.apache.lucene.document.Field.Store#NO not stored},
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions} (or
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions and offsets}),

View File

@ -368,7 +368,7 @@ public class MemoryIndexTest extends TestCase {
private Document createDocument(String content) {
Document doc = new Document();
doc.add(new Field(FIELD_NAME, content, Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field(FIELD_NAME, content, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
return doc;
}

View File

@ -64,13 +64,13 @@ public class TestFieldNormModifier extends TestCase {
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
d.add(new Field("field", "word", Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("nonorm", "word", Field.Store.YES, Field.Index.NO_NORMS));
d.add(new Field("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
d.add(new Field("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED));
for (int j = 1; j <= i; j++) {
d.add(new Field("field", "crap", Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("nonorm", "more words", Field.Store.YES, Field.Index.NO_NORMS));
d.add(new Field("field", "crap", Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);
}

View File

@ -34,33 +34,33 @@ public class TestTermVectorAccessor extends TestCase {
Document doc;
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
iw.close();

View File

@ -58,9 +58,9 @@ public class ChainedFilterTest extends TestCase {
for (int i = 0; i < MAX; i++) {
Document doc = new Document();
doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("date", cal.getTime().toString(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
cal.add(Calendar.DATE, 1);

View File

@ -65,15 +65,15 @@ public class TestLengthNormModifier extends TestCase {
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
d.add(new Field("field", "word",
Field.Store.YES, Field.Index.TOKENIZED));
Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "word",
Field.Store.YES, Field.Index.NO_NORMS));
Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
for (int j = 1; j <= i; j++) {
d.add(new Field("field", "crap",
Field.Store.YES, Field.Index.TOKENIZED));
Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "more words",
Field.Store.YES, Field.Index.NO_NORMS));
Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);
}

View File

@ -60,10 +60,10 @@ public class BooleanFilterTest extends TestCase
private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
{
Document doc=new Document();
doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("price",price,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("inStock",inStock,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("price",price,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -70,9 +70,9 @@ public class DuplicateFilterTest extends TestCase
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
{
Document doc=new Document();
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.UN_TOKENIZED));
doc.add(new Field("text",text,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED));
doc.add(new Field("text",text,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -43,7 +43,7 @@ public class TermsFilterTest extends TestCase
{
Document doc=new Document();
int term=i*10; //terms are units of 10;
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.UN_TOKENIZED));
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
w.close();

View File

@ -39,7 +39,7 @@ public class TestRegexQuery extends TestCase {
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -36,13 +36,13 @@ public class TestSpanRegexQuery extends TestCase {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
Document doc = new Document();
// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.TOKENIZED));
// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
// writer.addDocument(doc);
// doc = new Document();
doc.add(new Field("field", "auto update", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("field", "auto update", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "first auto update", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("field", "first auto update", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -384,7 +384,7 @@ public class SpellChecker {
private static Document createDocument(String text, int ng1, int ng2) {
Document doc = new Document();
doc.add(new Field(F_WORD, text, Field.Store.YES, Field.Index.UN_TOKENIZED)); // orig term
doc.add(new Field(F_WORD, text, Field.Store.YES, Field.Index.NOT_ANALYZED)); // orig term
addGram(text, doc, ng1, ng2);
return doc;
}
@ -396,14 +396,14 @@ public class SpellChecker {
String end = null;
for (int i = 0; i < len - ng + 1; i++) {
String gram = text.substring(i, i + ng);
doc.add(new Field(key, gram, Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field(key, gram, Field.Store.NO, Field.Index.NOT_ANALYZED));
if (i == 0) {
doc.add(new Field("start" + ng, gram, Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("start" + ng, gram, Field.Store.NO, Field.Index.NOT_ANALYZED));
}
end = gram;
}
if (end != null) { // may not be present if len==ng1
doc.add(new Field("end" + ng, end, Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("end" + ng, end, Field.Store.NO, Field.Index.NOT_ANALYZED));
}
}
}

View File

@ -53,23 +53,23 @@ public class TestLuceneDictionary extends TestCase {
Document doc;
doc = new Document();
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("contents", "Tom", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("zzz", "bar", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();

View File

@ -50,8 +50,8 @@ public class TestSpellChecker extends TestCase {
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(new Field("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.TOKENIZED)); // + word thousand
doc.add(new Field("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
writer.addDocument(doc);
}
writer.close();

View File

@ -40,7 +40,7 @@ public class SingleFieldTestDb {
IndexWriter.MaxFieldLength.LIMITED);
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -122,9 +122,9 @@ public class ListSearcher extends AbstractListModel {
//this will allow us to retrive the results later
//and map this list model's row to a row in the decorated
//list model
document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.TOKENIZED));
document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.ANALYZED));
//add the string representation of the row to the index
document.add(new Field(FIELD_NAME, String.valueOf(listModel.getElementAt(row)).toLowerCase(), Field.Store.YES, Field.Index.TOKENIZED));
document.add(new Field(FIELD_NAME, String.valueOf(listModel.getElementAt(row)).toLowerCase(), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(document);
}
writer.optimize();

View File

@ -173,14 +173,14 @@ public class TableSearcher extends AbstractTableModel {
//this will allow us to retrive the results later
//and map this table model's row to a row in the decorated
//table model
document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.TOKENIZED));
document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.ANALYZED));
//iterate through all columns
//index the value keyed by the column name
//NOTE: there could be a problem with using column names with spaces
for (int column=0; column < tableModel.getColumnCount(); column++){
String columnName = tableModel.getColumnName(column);
String columnValue = String.valueOf(tableModel.getValueAt(row, column)).toLowerCase();
document.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.TOKENIZED));
document.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(document);
}

View File

@ -256,7 +256,7 @@ public class Syns2Index
int n = index(word2Nums, num2Words, g, doc);
if (n > 0)
{
doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
if ((++row % mod) == 0)
{
o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc);

View File

@ -70,8 +70,8 @@ public class TestParser extends TestCase {
String date=line.substring(0,endOfDate).trim();
String content=line.substring(endOfDate).trim();
org.apache.lucene.document.Document doc =new org.apache.lucene.document.Document();
doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("contents",content,Field.Store.YES,Field.Index.TOKENIZED));
doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
doc.add(new Field("contents",content,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
line=d.readLine();
}

View File

@ -128,7 +128,7 @@ public class TestQueryTemplateManager extends TestCase {
if(st.hasMoreTokens())
{
String value=st.nextToken().trim();
result.add(new Field(name,value,Field.Store.YES,Field.Index.TOKENIZED));
result.add(new Field(name,value,Field.Store.YES,Field.Index.ANALYZED));
}
}
return result;

View File

@ -47,14 +47,14 @@ public class FileDocument {
// Add the path of the file as a field named "path". Use a field that is
// indexed (i.e. searchable), but don't tokenize the field into words.
doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use
// a field that is indexed (i.e. searchable), but don't tokenize the field
// into words.
doc.add(new Field("modified",
DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE),
Field.Store.YES, Field.Index.UN_TOKENIZED));
Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the contents of the file to a field named "contents". Specify a Reader,
// so that the text of the file is tokenized and indexed, but not stored.

View File

@ -49,19 +49,19 @@ public class HTMLDocument {
// Add the url as a field named "path". Use a field that is
// indexed (i.e. searchable), but don't tokenize the field into words.
doc.add(new Field("path", f.getPath().replace(dirSep, '/'), Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified".
// Use a field that is indexed (i.e. searchable), but don't tokenize
// the field into words.
doc.add(new Field("modified",
DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE),
Field.Store.YES, Field.Index.UN_TOKENIZED));
Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the uid as a field, so that index can be incrementally maintained.
// This field is not stored with document, it is indexed, but it is not
// tokenized prior to indexing.
doc.add(new Field("uid", uid(f), Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("uid", uid(f), Field.Store.NO, Field.Index.NOT_ANALYZED));
FileInputStream fis = new FileInputStream(f);
HTMLParser parser = new HTMLParser(fis);
@ -75,7 +75,7 @@ public class HTMLDocument {
doc.add(new Field("summary", parser.getSummary(), Field.Store.YES, Field.Index.NO));
// Add the title as a field that it can be searched and that is stored.
doc.add(new Field("title", parser.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("title", parser.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
// return the document
return doc;

View File

@ -68,16 +68,20 @@ public abstract class AbstractField implements Fieldable {
if (index == Field.Index.NO) {
this.isIndexed = false;
this.isTokenized = false;
} else if (index == Field.Index.TOKENIZED) {
} else if (index == Field.Index.ANALYZED) {
this.isIndexed = true;
this.isTokenized = true;
} else if (index == Field.Index.UN_TOKENIZED) {
} else if (index == Field.Index.NOT_ANALYZED) {
this.isIndexed = true;
this.isTokenized = false;
} else if (index == Field.Index.NO_NORMS) {
} else if (index == Field.Index.NOT_ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = false;
this.omitNorms = true;
} else if (index == Field.Index.ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = true;
this.omitNorms = true;
} else {
throw new IllegalArgumentException("unknown index parameter " + index);
}

View File

@ -69,30 +69,49 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
* {@link Field.Store stored}. */
public static final Index NO = new Index("NO");
/** Index the field's value so it can be searched. An Analyzer will be used
* to tokenize and possibly further normalize the text before its
* terms will be stored in the index. This is useful for common text.
*/
public static final Index TOKENIZED = new Index("TOKENIZED");
/** Index the tokens produced by running the field's
* value through an Analyzer. This is useful for
* common text. */
public static final Index ANALYZED = new Index("ANALYZED");
/** @deprecated this has been renamed to {@link #ANALYZED} */
public static final Index TOKENIZED = ANALYZED;
/** Index the field's value without using an Analyzer, so it can be searched.
* As no analyzer is used the value will be stored as a single term. This is
* useful for unique Ids like product numbers.
*/
public static final Index UN_TOKENIZED = new Index("UN_TOKENIZED");
public static final Index NOT_ANALYZED = new Index("NOT_ANALYZED");
/** Index the field's value without an Analyzer, and disable
* the storing of norms. No norms means that index-time boosting
* and field length normalization will be disabled. The benefit is
* less memory usage as norms take up one byte per indexed field
* for every document in the index.
* Note that once you index a given field <i>with</i> norms enabled,
* disabling norms will have no effect. In other words, for NO_NORMS
* to have the above described effect on a field, all instances of that
* field must be indexed with NO_NORMS from the beginning.
*/
public static final Index NO_NORMS = new Index("NO_NORMS");
/** @deprecated This has been renamed to {@link #NOT_ANALYZED} */
public static final Index UN_TOKENIZED = NOT_ANALYZED;
/** Expert: Index the field's value without an Analyzer,
* and also disable the storing of norms. Note that you
* can also separately enable/disable norms by calling
* {@link #setOmitNorms}. No norms means that
* index-time field and document boosting and field
* length normalization are disabled. The benefit is
* less memory usage as norms take up one byte of RAM
* per indexed field for every document in the index,
* during searching. Note that once you index a given
* field <i>with</i> norms enabled, disabling norms will
* have no effect. In other words, for this to have the
* above described effect on a field, all instances of
* that field must be indexed with NOT_ANALYZED_NO_NORMS
* from the beginning. */
public static final Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
/** @deprecated This has been renamed to
* {@link #NOT_ANALYZED_NO_NORMS} */
public static final Index NO_NORMS = NOT_ANALYZED_NO_NORMS;
/** Expert: Index the tokens produced by running the
* field's value through an Analyzer, and also
* separately disable the storing of norms. See
* {@link #NOT_ANALYZED_NO_NORMS} for what norms are
* and why you may want to disable them. */
public static final Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
}
/** Specifies whether and how a field should have term vectors. */
@ -284,16 +303,20 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
if (index == Index.NO) {
this.isIndexed = false;
this.isTokenized = false;
} else if (index == Index.TOKENIZED) {
} else if (index == Index.ANALYZED) {
this.isIndexed = true;
this.isTokenized = true;
} else if (index == Index.UN_TOKENIZED) {
} else if (index == Index.NOT_ANALYZED) {
this.isIndexed = true;
this.isTokenized = false;
} else if (index == Index.NO_NORMS) {
} else if (index == Index.NOT_ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = false;
this.omitNorms = true;
} else if (index == Index.ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = true;
this.omitNorms = true;
} else {
throw new IllegalArgumentException("unknown index parameter " + index);
}

View File

@ -405,9 +405,9 @@ final class FieldsReader {
private Field.Index getIndexType(FieldInfo fi, boolean tokenize) {
Field.Index index;
if (fi.isIndexed && tokenize)
index = Field.Index.TOKENIZED;
index = Field.Index.ANALYZED;
else if (fi.isIndexed && !tokenize)
index = Field.Index.UN_TOKENIZED;
index = Field.Index.NOT_ANALYZED;
else
index = Field.Index.NO;
return index;

View File

@ -57,8 +57,8 @@ import java.io.PrintStream;
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;create&nbsp;an&nbsp;index&nbsp;in&nbsp;/tmp/index,&nbsp;overwriting&nbsp;an&nbsp;existing&nbsp;one:</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">IndexModifier&nbsp;indexModifier&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">IndexModifier</font><font color="#000000">(</font><font color="#2a00ff">&#34;/tmp/index&#34;</font><font color="#000000">,&nbsp;analyzer,&nbsp;</font><font color="#7f0055"><b>true</b></font><font color="#000000">)</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Document&nbsp;doc&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Document</font><font color="#000000">()</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;id&#34;</font><font color="#000000">,&nbsp;</font><font color="#2a00ff">&#34;1&#34;</font><font color="#000000">,&nbsp;Field.Store.YES,&nbsp;Field.Index.UN_TOKENIZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;body&#34;</font><font color="#000000">,&nbsp;</font><font color="#2a00ff">&#34;a&nbsp;simple&nbsp;test&#34;</font><font color="#000000">,&nbsp;Field.Store.YES,&nbsp;Field.Index.TOKENIZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;id&#34;</font><font color="#000000">,&nbsp;</font><font color="#2a00ff">&#34;1&#34;</font><font color="#000000">,&nbsp;Field.Store.YES,&nbsp;Field.Index.NOT_ANALYZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;body&#34;</font><font color="#000000">,&nbsp;</font><font color="#2a00ff">&#34;a&nbsp;simple&nbsp;test&#34;</font><font color="#000000">,&nbsp;Field.Store.YES,&nbsp;Field.Index.ANALYZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">indexModifier.addDocument</font><font color="#000000">(</font><font color="#000000">doc</font><font color="#000000">)</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#7f0055"><b>int&nbsp;</b></font><font color="#000000">deleted&nbsp;=&nbsp;indexModifier.delete</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Term</font><font color="#000000">(</font><font color="#2a00ff">&#34;id&#34;</font><font color="#000000">,&nbsp;</font><font color="#2a00ff">&#34;1&#34;</font><font color="#000000">))</font><font color="#000000">;</font><br/>
<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">System.out.println</font><font color="#000000">(</font><font color="#2a00ff">&#34;Deleted&nbsp;&#34;&nbsp;</font><font color="#000000">+&nbsp;deleted&nbsp;+&nbsp;</font><font color="#2a00ff">&#34;&nbsp;document&#34;</font><font color="#000000">)</font><font color="#000000">;</font><br/>
@ -593,8 +593,8 @@ public class IndexModifier {
// create an index in /tmp/index, overwriting an existing one:
IndexModifier indexModifier = new IndexModifier("/tmp/index", analyzer, true);
Document doc = new Document();
doc.add(new Fieldable("id", "1", Fieldable.Store.YES, Fieldable.Index.UN_TOKENIZED));
doc.add(new Fieldable("body", "a simple test", Fieldable.Store.YES, Fieldable.Index.TOKENIZED));
doc.add(new Fieldable("id", "1", Fieldable.Store.YES, Fieldable.Index.NOT_ANALYZED));
doc.add(new Fieldable("body", "a simple test", Fieldable.Store.YES, Fieldable.Index.ANALYZED));
indexModifier.addDocument(doc);
int deleted = indexModifier.delete(new Term("id", "1"));
System.out.println("Deleted " + deleted + " document");

View File

@ -30,7 +30,7 @@ import java.io.Serializable;
* and does not need to be stored (unless you happen to want it back with the
* rest of your document data). In other words:
*
* <p><code>document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.UN_TOKENIZED));</code></p>
* <p><code>document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.NOT_ANALYZED));</code></p>
*
*
* <p><h3>Valid Types of Values</h3>

View File

@ -44,7 +44,7 @@ class SearchTest {
};
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -50,8 +50,8 @@ class SearchTestForDuplicates {
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -56,7 +56,7 @@ public class TestDemo extends LuceneTestCase {
Document doc = new Document();
String text = "This is the text to be indexed.";
doc.add(new Field("fieldname", text, Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
iwriter.addDocument(doc);
iwriter.close();

View File

@ -46,11 +46,11 @@ public class TestHitIterator extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "iterator test doc 1", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "iterator test doc 1", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("field", "iterator test doc 2", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "iterator test doc 2", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();

View File

@ -88,7 +88,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
dir.failOn(new FailOnlyOnMerge());
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

View File

@ -93,7 +93,7 @@ public class TestSearch extends LuceneTestCase {
};
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -93,8 +93,8 @@ public class TestSearchForDuplicates extends LuceneTestCase {
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -72,7 +72,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
// Force frequent commits
writer.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<7;i++)
writer.addDocument(doc);
IndexCommit cp = (IndexCommit) dp.snapshot();
@ -115,7 +115,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
final Thread t = new Thread() {
public void run() {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
while(System.currentTimeMillis() < stopTime) {
for(int i=0;i<27;i++) {
try {
@ -159,7 +159,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
// final segment, so deletion policy has a chance to
// delete again:
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Make sure we don't have any leftover files in the

View File

@ -56,8 +56,8 @@ class ThreadSafetyTest {
for (int i = 0; i < 1024*ITERATIONS; i++) {
Document d = new Document();
int n = RANDOM.nextInt();
d.add(new Field("id", Integer.toString(n), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(n), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
System.out.println("Adding " + n);
// Switch between single and multiple file segments

View File

@ -43,8 +43,8 @@ public class TestKeywordAnalyzer extends LuceneTestCase {
true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -69,10 +69,10 @@ public class TestKeywordAnalyzer extends LuceneTestCase {
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();

View File

@ -121,7 +121,7 @@ public class TestDocument extends LuceneTestCase
public void testConstructorExceptions()
{
new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
new Field("name", "value", Field.Store.NO, Field.Index.UN_TOKENIZED); // okay
new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
try {
new Field("name", "value", Field.Store.NO, Field.Index.NO);
fail();
@ -177,14 +177,14 @@ public class TestDocument extends LuceneTestCase
private Document makeDocumentWithFields()
{
Document doc = new Document();
doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.ANALYZED));
return doc;
}
@ -222,10 +222,10 @@ public class TestDocument extends LuceneTestCase
public void testFieldSetValue() throws Exception {
Field field = new Field("id", "id1", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED);
Document doc = new Document();
doc.add(field);
doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

View File

@ -34,35 +34,35 @@ class DocHelper {
public static final String FIELD_1_TEXT = "field one text";
public static final String TEXT_FIELD_1_KEY = "textField1";
public static Field textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT,
Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO);
Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
public static final String FIELD_2_TEXT = "field field field two text";
//Fields will be lexicographically sorted. So, the order is: field, text, two
public static final int [] FIELD_2_FREQS = {3, 1, 1};
public static final String TEXT_FIELD_2_KEY = "textField2";
public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
public static final String FIELD_2_COMPRESSED_TEXT = "field field field two text";
//Fields will be lexicographically sorted. So, the order is: field, text, two
public static final int [] COMPRESSED_FIELD_2_FREQS = {3, 1, 1};
public static final String COMPRESSED_TEXT_FIELD_2_KEY = "compressedTextField2";
public static Field compressedTextField2 = new Field(COMPRESSED_TEXT_FIELD_2_KEY, FIELD_2_COMPRESSED_TEXT, Field.Store.COMPRESS, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
public static Field compressedTextField2 = new Field(COMPRESSED_TEXT_FIELD_2_KEY, FIELD_2_COMPRESSED_TEXT, Field.Store.COMPRESS, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms";
public static final String TEXT_FIELD_3_KEY = "textField3";
public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.ANALYZED);
static { textField3.setOmitNorms(true); }
public static final String KEYWORD_TEXT = "Keyword";
public static final String KEYWORD_FIELD_KEY = "keyField";
public static Field keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT,
Field.Store.YES, Field.Index.UN_TOKENIZED);
Field.Store.YES, Field.Index.NOT_ANALYZED);
public static final String NO_NORMS_TEXT = "omitNormsText";
public static final String NO_NORMS_KEY = "omitNorms";
public static Field noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT,
Field.Store.YES, Field.Index.NO_NORMS);
Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
public static final String UNINDEXED_FIELD_TEXT = "unindexed field text";
public static final String UNINDEXED_FIELD_KEY = "unIndField";
@ -73,12 +73,12 @@ class DocHelper {
public static final String UNSTORED_1_FIELD_TEXT = "unstored field text";
public static final String UNSTORED_FIELD_1_KEY = "unStoredField1";
public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT,
Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO);
Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO);
public static final String UNSTORED_2_FIELD_TEXT = "unstored field text";
public static final String UNSTORED_FIELD_2_KEY = "unStoredField2";
public static Field unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT,
Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES);
Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES);
public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary";
public static byte [] LAZY_FIELD_BINARY_BYTES;
@ -86,7 +86,7 @@ class DocHelper {
public static final String LAZY_FIELD_KEY = "lazyField";
public static final String LAZY_FIELD_TEXT = "These are some field bytes";
public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField";
public static String LARGE_LAZY_FIELD_TEXT;
@ -96,14 +96,14 @@ class DocHelper {
public static final String FIELD_UTF1_TEXT = "field one \u4e00text";
public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8";
public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT,
Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO);
Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text";
//Fields will be lexicographically sorted. So, the order is: field, text, two
public static final int [] FIELD_UTF2_FREQS = {3, 1, 1};
public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8";
public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
@ -155,7 +155,7 @@ class DocHelper {
lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES, Field.Store.YES);
fields[fields.length - 2] = lazyFieldBinary;
LARGE_LAZY_FIELD_TEXT = buffer.toString();
largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
fields[fields.length - 1] = largeLazyField;
for (int i=0; i<fields.length; i++) {
Fieldable f = fields[i];

View File

@ -137,9 +137,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
// Deletes one of the 10 added docs, leaving 9:
@ -172,9 +172,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
@ -210,9 +210,9 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
// docs, so 10 pending deletes:
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content", "bbb " + i, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
@ -434,7 +434,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
@ -443,7 +443,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new Field("content", "bbb", Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
@ -507,7 +507,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<60;i++)
writer.addDocument(doc);
writer.setMaxBufferedDocs(200);

View File

@ -95,8 +95,8 @@ public class TestAtomicUpdate extends LuceneTestCase {
// Update all 100 docs...
for(int i=0; i<100; i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED));
writer.updateDocument(new Term("id", Integer.toString(i)), d);
}
}
@ -132,8 +132,8 @@ public class TestAtomicUpdate extends LuceneTestCase {
// Establish a base index of 100 docs:
for(int i=0;i<100;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.commit();

View File

@ -474,11 +474,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}

View File

@ -37,7 +37,7 @@ public class TestCheckIndex extends LuceneTestCase {
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++) {
writer.addDocument(doc);
}

View File

@ -68,7 +68,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
writer.setMergeScheduler(cms);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int i=0;i<10;i++) {
for(int j=0;j<20;j++) {
@ -113,7 +113,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
mp.setMinMergeDocs(1000);
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int i=0;i<10;i++) {
for(int j=0;j<100;j++) {
@ -154,7 +154,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
for(int j=0;j<21;j++) {
Document doc = new Document();
doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -175,7 +175,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
RAMDirectory directory = new MockRAMDirectory();
Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<2;pass++) {

View File

@ -41,8 +41,8 @@ public class TestCrash extends LuceneTestCase {
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("id", "0", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED));
for(int i=0;i<157;i++)
writer.addDocument(doc);

View File

@ -654,7 +654,7 @@ public class TestDeletionPolicy extends LuceneTestCase
private void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.demo.FileDocument;
import java.io.*;
@ -159,7 +160,6 @@ public class TestDoc extends LuceneTestCase {
assertEquals(multiFileOutput, singleFileOutput);
}
private SegmentInfo indexDoc(IndexWriter writer, String fileName)
throws Exception
{

View File

@ -116,8 +116,8 @@ public class TestDocumentWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("repeated", "repeated one", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.flush();
@ -172,7 +172,7 @@ public class TestDocumentWriter extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.flush();
@ -243,11 +243,11 @@ public class TestDocumentWriter extends LuceneTestCase {
public void testMixedTermVectorSettingsSameField() throws Exception {
Document doc = new Document();
// f1 first without tv then with tv
doc.add(new Field("f1", "v1", Store.YES, Index.UN_TOKENIZED, TermVector.NO));
doc.add(new Field("f1", "v2", Store.YES, Index.UN_TOKENIZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
doc.add(new Field("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
// f2 first with tv then without tv
doc.add(new Field("f2", "v1", Store.YES, Index.UN_TOKENIZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("f2", "v2", Store.YES, Index.UN_TOKENIZED, TermVector.NO));
doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
RAMDirectory ram = new RAMDirectory();
IndexWriter writer = new IndexWriter(ram, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

View File

@ -97,15 +97,15 @@ public class TestFilterIndexReader extends LuceneTestCase {
IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("default","one two", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("default","one two", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("default","one three", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("default","one three", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d2);
Document d3 = new Document();
d3.add(new Field("default","two four", Field.Store.YES, Field.Index.TOKENIZED));
d3.add(new Field("default","two four", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d3);
writer.close();

View File

@ -189,8 +189,8 @@ public class TestIndexFileDeleter extends LuceneTestCase
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -127,8 +127,8 @@ public class TestIndexModifier extends LuceneTestCase {
private Document getDoc() {
Document doc = new Document();
doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.NOT_ANALYZED));
docCount++;
return doc;
}
@ -272,15 +272,15 @@ class IndexThread extends Thread {
Document doc = new Document();
synchronized (getClass()) {
doc.add(new Field("id", Integer.toString(id), Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
id++;
}
// add random stuff:
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
Field.Index.TOKENIZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.ANALYZED));
return doc;
}

View File

@ -190,11 +190,11 @@ public class TestIndexReader extends LuceneTestCase
// new termvector fields
for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {
Document doc = new Document();
doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
@ -773,8 +773,8 @@ public class TestIndexReader extends LuceneTestCase
IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<157;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();
@ -1170,31 +1170,31 @@ public class TestIndexReader extends LuceneTestCase
private void addDocumentWithFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("text","test1", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
@ -1202,7 +1202,7 @@ public class TestIndexReader extends LuceneTestCase
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void rmDir(File dir) {

View File

@ -157,7 +157,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
for (int i=0; i<4; i++) {
for (int j=0; j<M; j++) {
Document doc = new Document();
doc.add(new Field("id", i+"_"+j, Store.YES, Index.UN_TOKENIZED));
doc.add(new Field("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED));
iwriter.addDocument(doc);
if (i>0) {
int k = i-1;
@ -884,11 +884,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
Document doc = new Document();
sb.append("a");
sb.append(n);
doc.add(new Field("field1", sb.toString(), Store.YES, Index.TOKENIZED));
doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED));
sb.append(" b");
sb.append(n);
for (int i = 1; i < numFields; i++) {
doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.TOKENIZED));
doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED));
}
return doc;
}

View File

@ -129,15 +129,15 @@ public class TestIndexWriter extends LuceneTestCase
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -566,12 +566,12 @@ public class TestIndexWriter extends LuceneTestCase
// Max length term is 16383, so this contents produces
// a too-long term:
String contents = "abc xyz x" + bigTerm + " another term";
doc.add(new Field("content", contents, Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -599,7 +599,7 @@ public class TestIndexWriter extends LuceneTestCase
// Make sure we can add a document with exactly the
// maximum length term, and search on that term:
doc = new Document();
doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
StandardAnalyzer sa = new StandardAnalyzer();
sa.setMaxTokenLength(100000);
writer = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED);
@ -617,7 +617,7 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
@ -655,7 +655,7 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
LogDocMergePolicy ldmp = new LogDocMergePolicy();
@ -1245,12 +1245,12 @@ public class TestIndexWriter extends LuceneTestCase
writer.setMaxBufferedDocs(10);
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
@ -1277,7 +1277,7 @@ public class TestIndexWriter extends LuceneTestCase
int lastNumFile = dir.list().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.list().length;
// Verify that with a tiny RAM buffer we see new
@ -1300,7 +1300,7 @@ public class TestIndexWriter extends LuceneTestCase
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
@ -1354,7 +1354,7 @@ public class TestIndexWriter extends LuceneTestCase
for(int j=1;j<52;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -1416,7 +1416,7 @@ public class TestIndexWriter extends LuceneTestCase
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
@ -1425,7 +1425,7 @@ public class TestIndexWriter extends LuceneTestCase
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -1440,7 +1440,7 @@ public class TestIndexWriter extends LuceneTestCase
String longTerm = b.toString();
Document doc = new Document();
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
@ -1461,7 +1461,7 @@ public class TestIndexWriter extends LuceneTestCase
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
@ -1482,7 +1482,7 @@ public class TestIndexWriter extends LuceneTestCase
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
@ -1514,7 +1514,7 @@ public class TestIndexWriter extends LuceneTestCase
b.append(" a a a a a a a a");
}
Document doc = new Document();
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
@ -1571,7 +1571,7 @@ public class TestIndexWriter extends LuceneTestCase
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true, true);
@ -1589,7 +1589,7 @@ public class TestIndexWriter extends LuceneTestCase
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.flush();
writer.addDocument(new Document());
@ -1609,7 +1609,7 @@ public class TestIndexWriter extends LuceneTestCase
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeScheduler(new ConcurrentMergeScheduler());
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(101);
for(int i=0;i<200;i++)
@ -1663,7 +1663,7 @@ public class TestIndexWriter extends LuceneTestCase
IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
ir.addDocument(document);
ir.close();
@ -1675,17 +1675,17 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.flush();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@ -1701,13 +1701,13 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.flush();
document = new Document();
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
@ -1715,7 +1715,7 @@ public class TestIndexWriter extends LuceneTestCase
iw.optimize();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@ -1733,7 +1733,7 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.setMaxBufferedDocs(2);
iw.setMergeFactor(2);
@ -1775,7 +1775,7 @@ public class TestIndexWriter extends LuceneTestCase
iw.setMaxBufferedDocs(2);
iw.setMergeFactor(2);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
@ -1805,7 +1805,7 @@ public class TestIndexWriter extends LuceneTestCase
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
@ -1815,13 +1815,13 @@ public class TestIndexWriter extends LuceneTestCase
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -1887,7 +1887,7 @@ public class TestIndexWriter extends LuceneTestCase
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
@ -1939,13 +1939,13 @@ public class TestIndexWriter extends LuceneTestCase
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
@ -1955,7 +1955,7 @@ public class TestIndexWriter extends LuceneTestCase
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
@ -1982,7 +1982,7 @@ public class TestIndexWriter extends LuceneTestCase
writer.setMaxBufferedDocs(10);
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
@ -2034,13 +2034,13 @@ public class TestIndexWriter extends LuceneTestCase
for(int iter=0;iter<NUM_ITER;iter++) {
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
@ -2050,7 +2050,7 @@ public class TestIndexWriter extends LuceneTestCase
if (0 == finalI) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
@ -2100,7 +2100,7 @@ public class TestIndexWriter extends LuceneTestCase
writer.setMaxBufferedDocs(10);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
@ -2140,21 +2140,21 @@ public class TestIndexWriter extends LuceneTestCase
if (i == 7) {
// Add empty docs here
doc.add(new Field("content3", "", Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
} else {
Field.Store storeVal;
if (i%2 == 0) {
doc.add(new Field("content4", contents, Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
storeVal = Field.Store.YES;
} else
storeVal = Field.Store.NO;
doc.add(new Field("content1", contents, storeVal,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("content3", "", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("content5", "", storeVal,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
}
for(int j=0;j<4;j++)
@ -2178,7 +2178,7 @@ public class TestIndexWriter extends LuceneTestCase
RAMDirectory directory = new MockRAMDirectory();
final Document doc = new Document();
Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
for(int pass=0;pass<3;pass++) {
@ -2287,7 +2287,7 @@ public class TestIndexWriter extends LuceneTestCase
public void run() {
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
int idUpto = 0;
int fullCount = 0;
@ -2414,7 +2414,7 @@ public class TestIndexWriter extends LuceneTestCase
dir.setMaxSizeInBytes(dir.getRecomputedActualSizeInBytes());
writer.setMaxBufferedDocs(2);
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit disk full");
@ -2516,7 +2516,7 @@ public class TestIndexWriter extends LuceneTestCase
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<6;i++)
writer.addDocument(doc);
@ -2721,7 +2721,7 @@ public class TestIndexWriter extends LuceneTestCase
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -2876,7 +2876,7 @@ public class TestIndexWriter extends LuceneTestCase
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
@ -2930,7 +2930,7 @@ public class TestIndexWriter extends LuceneTestCase
document = new Document();
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
writer.addDocument(document);
@ -2964,7 +2964,7 @@ public class TestIndexWriter extends LuceneTestCase
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
@ -3004,7 +3004,7 @@ public class TestIndexWriter extends LuceneTestCase
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@ -3032,7 +3032,7 @@ public class TestIndexWriter extends LuceneTestCase
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<10;i++)
@ -3079,7 +3079,7 @@ public class TestIndexWriter extends LuceneTestCase
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
@ -3126,7 +3126,7 @@ public class TestIndexWriter extends LuceneTestCase
Field.Index.NO);
document.add(storedField);
Field termVectorField = new Field("termVector", "termVector",
Field.Store.NO, Field.Index.UN_TOKENIZED,
Field.Store.NO, Field.Index.NOT_ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS);
document.add(termVectorField);
for(int i=0;i<98;i++)
@ -3160,7 +3160,7 @@ public class TestIndexWriter extends LuceneTestCase
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
}
@ -3186,7 +3186,7 @@ public class TestIndexWriter extends LuceneTestCase
MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
w.addDocument(doc);
w.doFail = true;
try {
@ -3207,7 +3207,7 @@ public class TestIndexWriter extends LuceneTestCase
w.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
w.addDocument(doc);
Analyzer analyzer = new Analyzer() {
@ -3218,7 +3218,7 @@ public class TestIndexWriter extends LuceneTestCase
Document crashDoc = new Document();
crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
try {
w.addDocument(crashDoc, analyzer);
fail("did not hit expected exception");
@ -3258,7 +3258,7 @@ public class TestIndexWriter extends LuceneTestCase
w.setMergeScheduler(new ConcurrentMergeScheduler());
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
for(int i=0;i<10;i++)
try {
w.addDocument(doc);
@ -3291,7 +3291,7 @@ public class TestIndexWriter extends LuceneTestCase
MockIndexWriter3 w = new MockIndexWriter3(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
assertTrue(w.wasCalled);
@ -3343,7 +3343,7 @@ public class TestIndexWriter extends LuceneTestCase
IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
w.addDocument(doc);
dir.failOn(failure);
try {
@ -3393,7 +3393,7 @@ public class TestIndexWriter extends LuceneTestCase
final int count = utf8Data.length/2;
for(int i=0;i<count;i++)
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.close();
@ -4093,7 +4093,7 @@ public class TestIndexWriter extends LuceneTestCase
Document doc = new Document();
doc.add(new Field("test1", "this is some data that will be compressed this this this", Field.Store.COMPRESS, Field.Index.NO));
doc.add(new Field("test2", new byte[20], Field.Store.COMPRESS));
doc.add(new Field("field" + i, "random field", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("field" + i, "random field", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
} finally {
w.close();

View File

@ -52,14 +52,14 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc
.add(new Field("city", text[i], Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
modifier.addDocument(doc);
}
modifier.optimize();
@ -267,11 +267,11 @@ public class TestIndexWriterDelete extends LuceneTestCase {
private void addDoc(IndexWriter modifier, int id, int value)
throws IOException {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("id", String.valueOf(id), Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
doc.add(new Field("value", String.valueOf(value), Field.Store.NO,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
modifier.addDocument(doc);
}
@ -311,9 +311,9 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < 157; i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
d.add(new Field("content", "aaa " + i, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();
@ -383,9 +383,9 @@ public class TestIndexWriterDelete extends LuceneTestCase {
if (updates) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
d.add(new Field("content", "bbb " + i, Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
} else { // deletes
modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
@ -546,13 +546,13 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("city", text[i], Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
modifier.addDocument(doc);
}
// flush (and commit if ac)
@ -654,13 +654,13 @@ public class TestIndexWriterDelete extends LuceneTestCase {
for (int i = 0; i < keywords.length; i++) {
Document doc = new Document();
doc.add(new Field("id", keywords[i], Field.Store.YES,
Field.Index.UN_TOKENIZED));
Field.Index.NOT_ANALYZED));
doc.add(new Field("country", unindexed[i], Field.Store.YES,
Field.Index.NO));
doc.add(new Field("contents", unstored[i], Field.Store.NO,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
doc.add(new Field("city", text[i], Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
try {
modifier.addDocument(doc);
} catch (IOException io) {

View File

@ -49,17 +49,17 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
final Document doc = new Document();
doc.add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
doc.add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.UN_TOKENIZED));
doc.add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
final Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
final Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
final long stopTime = System.currentTimeMillis() + 3000;

View File

@ -199,7 +199,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
private void addDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -99,7 +99,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
for (int i = start; i < (start + numDocs); i++)
{
Document temp = new Document();
temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.UN_TOKENIZED));
temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(temp);
}

View File

@ -78,7 +78,7 @@ public class TestLazyBug extends LuceneTestCase {
data[f % data.length]
+ '#' + data[r.nextInt(data.length)],
Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
}
writer.addDocument(doc);
}

View File

@ -64,7 +64,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
content = this.term3 + " " + this.term2;
}
doc.add(new Field(this.field, content, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@ -111,7 +111,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 10; i++) {
Document doc = new Document();
doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}

View File

@ -50,7 +50,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
Term term = new Term("test", "a");
for (int i = 0; i < 5000; i++) {
Document d1 = new Document();
d1.add(new Field(term.field(), term.text(), Store.NO, Index.TOKENIZED));
d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
writer.addDocument(d1);
}
writer.flush();

View File

@ -152,7 +152,7 @@ public class TestMultiSegmentReader extends LuceneTestCase {
private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(), create, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("body", s, Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();
}

View File

@ -210,7 +210,7 @@ public class TestNorms extends LuceneTestCase {
Document d = new Document();
float boost = nextNorm();
for (int i = 0; i < 10; i++) {
Field f = new Field("f"+i,"v"+i,Store.NO,Index.UN_TOKENIZED);
Field f = new Field("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
f.setBoost(boost);
d.add(f);
}

View File

@ -60,11 +60,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
f2.setOmitTf(true);
d.add(f2);
@ -109,11 +109,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
f2.setOmitTf(true);
d.add(f2);
@ -163,11 +163,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
// this field will have Tf
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f1);
// this field will NOT have Tf
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
d.add(f2);
for(int i=0;i<5;i++)
@ -212,7 +212,7 @@ public class TestOmitTf extends LuceneTestCase {
writer.setUseCompoundFile(false);
Document d = new Document();
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
f1.setOmitTf(true);
d.add(f1);
@ -249,11 +249,11 @@ public class TestOmitTf extends LuceneTestCase {
Document d = new Document();
sb.append(term).append(" ");
String content = sb.toString();
Field noTf = new Field("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.TOKENIZED);
Field noTf = new Field("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED);
noTf.setOmitTf(true);
d.add(noTf);
Field tf = new Field("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.TOKENIZED);
Field tf = new Field("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.ANALYZED);
d.add(tf);
writer.addDocument(d);

View File

@ -107,7 +107,7 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir2 = new MockRAMDirectory();
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d3);
w2.close();
@ -152,13 +152,13 @@ public class TestParallelReader extends LuceneTestCase {
// add another document to ensure that the indexes are not optimized
IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
Document d = new Document();
d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
modifier.close();
modifier = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
d = new Document();
d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
modifier.close();
@ -215,16 +215,16 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir = new MockRAMDirectory();
IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f4", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f3", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f4", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(d2);
w.close();
@ -245,12 +245,12 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir1 = new MockRAMDirectory();
IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
w1.addDocument(d1);
Document d2 = new Document();
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
w1.addDocument(d2);
w1.close();
return dir1;
@ -260,12 +260,12 @@ public class TestParallelReader extends LuceneTestCase {
Directory dir2 = new RAMDirectory();
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.TOKENIZED));
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d3);
Document d4 = new Document();
d4.add(new Field("f3", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d4.add(new Field("f4", "v2", Field.Store.YES, Field.Index.TOKENIZED));
d4.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
d4.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d4);
w2.close();
return dir2;

View File

@ -42,10 +42,10 @@ public class TestParallelTermEnum extends LuceneTestCase {
doc = new Document();
doc.add(new Field("field1", "the quick brown fox jumps", Store.YES,
Index.TOKENIZED));
Index.ANALYZED));
doc.add(new Field("field2", "the quick brown fox jumps", Store.YES,
Index.TOKENIZED));
doc.add(new Field("field4", "", Store.NO, Index.TOKENIZED));
Index.ANALYZED));
doc.add(new Field("field4", "", Store.NO, Index.ANALYZED));
iw1.addDocument(doc);
iw1.close();
@ -53,11 +53,11 @@ public class TestParallelTermEnum extends LuceneTestCase {
IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.add(new Field("field0", "", Store.NO, Index.TOKENIZED));
doc.add(new Field("field0", "", Store.NO, Index.ANALYZED));
doc.add(new Field("field1", "the fox jumps over the lazy dog",
Store.YES, Index.TOKENIZED));
Store.YES, Index.ANALYZED));
doc.add(new Field("field3", "the fox jumps over the lazy dog",
Store.YES, Index.TOKENIZED));
Store.YES, Index.ANALYZED));
iw2.addDocument(doc);
iw2.close();

View File

@ -100,15 +100,15 @@ public class TestPayloads extends LuceneTestCase {
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document d = new Document();
// this field won't have any payloads
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
// this field will have payloads in all docs, however not for all term positions,
// so this field is used to check if the DocumentWriter correctly enables the payloads bit
// even if only some term positions have payloads
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
// this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads
// enabled in only some documents
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
// only add payload data for field f2
analyzer.setPayloadData("f2", 1, "somedata".getBytes(), 0, 1);
writer.addDocument(d);
@ -127,10 +127,10 @@ public class TestPayloads extends LuceneTestCase {
// enabled payloads for that field
writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
d = new Document();
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("f1", "This field has no payloads", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f2", "This field has payloads in all docs", Field.Store.NO, Field.Index.ANALYZED));
d.add(new Field("f3", "This field has payloads in some docs", Field.Store.NO, Field.Index.ANALYZED));
// add payload data for field f2 and f3
analyzer.setPayloadData("f2", "somedata".getBytes(), 0, 1);
analyzer.setPayloadData("f3", "somedata".getBytes(), 0, 3);
@ -189,7 +189,7 @@ public class TestPayloads extends LuceneTestCase {
byte[] payloadData = generateRandomData(payloadDataLength);
Document d = new Document();
d.add(new Field(fieldName, content, Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field(fieldName, content, Field.Store.NO, Field.Index.ANALYZED));
// add the same document multiple times to have the same payload lengths for all
// occurrences within two consecutive skip intervals
int offset = 0;
@ -307,7 +307,7 @@ public class TestPayloads extends LuceneTestCase {
String singleTerm = "lucene";
d = new Document();
d.add(new Field(fieldName, singleTerm, Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field(fieldName, singleTerm, Field.Store.NO, Field.Index.ANALYZED));
// add a payload whose length is greater than the buffer size of BufferedIndexOutput
payloadData = generateRandomData(2000);
analyzer.setPayloadData(fieldName, payloadData, 100, 1500);

View File

@ -258,7 +258,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -123,7 +123,7 @@ public class TestSegmentTermEnum extends LuceneTestCase
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
doc.add(new Field("content", value, Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}

View File

@ -82,8 +82,8 @@ public class TestStressIndexing extends LuceneTestCase {
for(int j=0; j<10; j++) {
Document d = new Document();
int n = RANDOM.nextInt();
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}

View File

@ -485,7 +485,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
ArrayList fields = new ArrayList();
String idString = getIdString();
Field idField = new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NO_NORMS);
Field idField = new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
fields.add(idField);
int nFields = nextInt(maxFields);
@ -509,16 +509,16 @@ public class TestStressIndexing2 extends LuceneTestCase {
switch (nextInt(4)) {
case 0:
fields.add(new Field("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NO_NORMS, tvVal));
fields.add(new Field("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal));
break;
case 1:
fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.TOKENIZED, tvVal));
fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal));
break;
case 2:
fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
break;
case 3:
fields.add(new Field("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.TOKENIZED, tvVal));
fields.add(new Field("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal));
break;
}
}

View File

@ -102,7 +102,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
tv = Field.TermVector.WITH_OFFSETS;
else
tv = Field.TermVector.YES;
doc.add(new Field(testFields[i], "", Field.Store.NO, Field.Index.TOKENIZED, tv));
doc.add(new Field(testFields[i], "", Field.Store.NO, Field.Index.ANALYZED, tv));
}
//Create 5 documents for testing, they all have the same

View File

@ -65,7 +65,7 @@ public class TestTermdocPerf extends LuceneTestCase {
};
Document doc = new Document();
doc.add(new Field(field,val, Field.Store.NO, Field.Index.NO_NORMS));
doc.add(new Field(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(100);
writer.setMergeFactor(100);

View File

@ -65,8 +65,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
for(int i=0;i<200;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
@ -87,8 +87,8 @@ public class TestThreadedOptimize extends LuceneTestCase {
writerFinal.optimize(false);
for(int k=0;k<17*(1+iFinal);k++) {
Document d = new Document();
d.add(new Field("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED));
writerFinal.addDocument(d);
}
for(int k=0;k<9*(1+iFinal);k++)

View File

@ -132,8 +132,8 @@ public class TestTransactions extends LuceneTestCase
for(int j=0; j<10; j++) {
Document d = new Document();
int n = RANDOM.nextInt();
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
@ -176,7 +176,7 @@ public class TestTransactions extends LuceneTestCase
for(int j=0; j<7; j++) {
Document d = new Document();
int n = RANDOM.nextInt();
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();

View File

@ -69,7 +69,7 @@ public class TestRAMDirectory extends LuceneTestCase {
Document doc = null;
for (int i = 0; i < docsToAdd; i++) {
doc = new Document();
doc.add(new Field("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
assertEquals(docsToAdd, writer.docCount());
@ -173,7 +173,7 @@ public class TestRAMDirectory extends LuceneTestCase {
public void run() {
for (int j=1; j<docsPerThread; j++) {
Document doc = new Document();
doc.add(new Field("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
try {
writer.addDocument(doc);
} catch (IOException e) {

View File

@ -287,7 +287,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();

View File

@ -917,10 +917,10 @@ public class TestQueryParser extends LuceneTestCase {
private static void addDateDoc(String content, int year, int month,
int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(new Field("f", content, Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance();
cal.set(year, month-1, day, hour, minute, second);
d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}

View File

@ -80,7 +80,7 @@ public class BaseTestRangeFilter extends LuceneTestCase {
for (int d = minId; d <= maxId; d++) {
Document doc = new Document();
doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.NOT_ANALYZED));
int r= rand.nextInt();
if (maxR < r) {
maxR = r;
@ -88,8 +88,8 @@ public class BaseTestRangeFilter extends LuceneTestCase {
if (r < minR) {
minR = r;
}
doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body","body", Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body","body", Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}

View File

@ -49,7 +49,7 @@ public class TestBoolean2 extends LuceneTestCase {
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.TOKENIZED));
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();

View File

@ -65,10 +65,10 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("data",data[i]));
doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
}
writer.addDocument(doc);
}

View File

@ -148,12 +148,12 @@ public class TestBooleanOr extends LuceneTestCase {
FIELD_T,
"Optimize not deleting all files",
Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
d.add(new Field(
FIELD_C,
"Deleted When I run an optimize in our production environment.",
Field.Store.YES,
Field.Index.TOKENIZED));
Field.Index.ANALYZED));
//
writer.addDocument(d);

View File

@ -68,7 +68,7 @@ public class TestBooleanPrefixQuery extends LuceneTestCase {
WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.close();

View File

@ -50,7 +50,7 @@ public class TestBooleanScorer extends LuceneTestCase
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < values.length; i++) {
Document doc = new Document();
doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.close();

View File

@ -72,10 +72,10 @@ public class TestConstantScoreRangeQuery extends BaseTestRangeFilter {
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
doc.add(new Field("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
if (null != data[i]) {
doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("data",data[i]));
doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
}
writer.addDocument(doc);
}

View File

@ -76,13 +76,13 @@ implements Serializable {
for (int i=0; i<INDEX_SIZE; ++i) { // don't decrease; if to low the problem doesn't show up
Document doc = new Document();
if((i%5)!=0) { // some documents must not have an entry in the first sort field
doc.add (new Field("publicationDate_", random.getLuceneDate(), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add (new Field("publicationDate_", random.getLuceneDate(), Field.Store.YES, Field.Index.NOT_ANALYZED));
}
if((i%7)==0) { // some documents to match the query (see below)
doc.add (new Field("content", "test", Field.Store.YES, Field.Index.TOKENIZED));
doc.add (new Field("content", "test", Field.Store.YES, Field.Index.ANALYZED));
}
// every document has a defined 'mandant' field
doc.add(new Field("mandant", Integer.toString(i%3), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("mandant", Integer.toString(i%3), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument (doc);
}
writer.optimize ();

View File

@ -56,8 +56,8 @@ public class TestDateFilter
Document doc = new Document();
// add time that is in the past
doc.add(new Field("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
@ -117,8 +117,8 @@ public class TestDateFilter
Document doc = new Document();
// add time that is in the future
doc.add(new Field("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.TOKENIZED));
doc.add(new Field("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();

View File

@ -103,13 +103,13 @@ public class TestDateSort extends TestCase {
Document document = new Document();
// Add the text field.
Field textField = new Field(TEXT_FIELD, text, Field.Store.YES, Field.Index.TOKENIZED);
Field textField = new Field(TEXT_FIELD, text, Field.Store.YES, Field.Index.ANALYZED);
document.add(textField);
// Add the date/time field.
String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
Field dateTimeField = new Field(DATE_TIME_FIELD, dateTimeString, Field.Store.YES,
Field.Index.UN_TOKENIZED);
Field.Index.NOT_ANALYZED);
document.add(dateTimeField);
return document;

View File

@ -86,38 +86,38 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
// d1 is an "ok" match for: albino elephant
{
Document d1 = new Document();
d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d1"));
d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "elephant"));
d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d1"));
d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
writer.addDocument(d1);
}
// d2 is a "good" match for: albino elephant
{
Document d2 = new Document();
d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d2"));
d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "albino"));
d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "elephant"));
d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d2"));
d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
writer.addDocument(d2);
}
// d3 is a "better" match for: albino elephant
{
Document d3 = new Document();
d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d3"));
d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "albino"));
d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d3"));
d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
writer.addDocument(d3);
}
// d4 is the "best" match for: albino elephant
{
Document d4 = new Document();
d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d4"));
d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "albino"));
d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "albino"));
d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d4"));
d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
writer.addDocument(d4);
}

Some files were not shown because too many files have changed in this diff Show More