mirror of https://github.com/apache/lucene.git
LUCENE-1961: Remove remaining deprecations from document package.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@823252 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3f418a0bba
commit
84b2c6ecaa
|
@ -36,6 +36,9 @@ API Changes
|
|||
|
||||
* LUCENE-1960: Remove deprecated Field.Store.COMPRESS. (Michael Busch)
|
||||
|
||||
* LUCENE-1961: Remove remaining deprecations from document package.
|
||||
(Michael Busch)
|
||||
|
||||
Bug fixes
|
||||
|
||||
New features
|
||||
|
|
|
@ -595,7 +595,7 @@ public class TestQPHelper extends LocalizedTestCase {
|
|||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
|
||||
Field.Index.UN_TOKENIZED));
|
||||
Field.Index.NOT_ANALYZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
|
|
@ -591,7 +591,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
|
|||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
|
||||
Field.Index.UN_TOKENIZED));
|
||||
Field.Index.NOT_ANALYZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
|
|
@ -96,14 +96,14 @@ public class TestCartesian extends TestCase{
|
|||
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(new Field("name", name,Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("name", name,Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
||||
// convert the lat / long to lucene fields
|
||||
doc.add(new Field(latField, NumericUtils.doubleToPrefixCoded(lat),Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field(lngField, NumericUtils.doubleToPrefixCoded(lng),Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field(latField, NumericUtils.doubleToPrefixCoded(lat),Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field(lngField, NumericUtils.doubleToPrefixCoded(lng),Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
// add a default meta field to make searching all documents easy
|
||||
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
||||
int ctpsize = ctps.size();
|
||||
for (int i =0; i < ctpsize; i++){
|
||||
|
@ -111,11 +111,11 @@ public class TestCartesian extends TestCase{
|
|||
doc.add(new Field(ctp.getTierFieldName(),
|
||||
NumericUtils.doubleToPrefixCoded(ctp.getTierBoxId(lat,lng)),
|
||||
Field.Store.YES,
|
||||
Field.Index.NO_NORMS));
|
||||
Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
|
||||
doc.add(new Field(geoHashPrefix, GeoHashUtils.encode(lat,lng),
|
||||
Field.Store.YES,
|
||||
Field.Index.NO_NORMS));
|
||||
Field.Index.NOT_ANALYZED_NO_NORMS));
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
|
||||
|
|
|
@ -63,14 +63,14 @@ public class TestDistance extends TestCase{
|
|||
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(new Field("name", name,Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("name", name,Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
||||
// convert the lat / long to lucene fields
|
||||
doc.add(new Field(latField, NumericUtils.doubleToPrefixCoded(lat),Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field(lngField, NumericUtils.doubleToPrefixCoded(lng),Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field(latField, NumericUtils.doubleToPrefixCoded(lat),Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field(lngField, NumericUtils.doubleToPrefixCoded(lng),Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
// add a default meta field to make searching all documents easy
|
||||
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.TOKENIZED));
|
||||
doc.add(new Field("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
||||
}
|
||||
|
|
|
@ -257,9 +257,6 @@ public abstract class AbstractField implements Fieldable {
|
|||
/** True if norms are omitted for this indexed field */
|
||||
public boolean getOmitNorms() { return omitNorms; }
|
||||
|
||||
/** @deprecated Renamed to {@link #getOmitTermFreqAndPositions} */
|
||||
public boolean getOmitTf() { return omitTermFreqAndPositions; }
|
||||
|
||||
/** @see #setOmitTermFreqAndPositions */
|
||||
public boolean getOmitTermFreqAndPositions() { return omitTermFreqAndPositions; }
|
||||
|
||||
|
@ -270,9 +267,6 @@ public abstract class AbstractField implements Fieldable {
|
|||
*/
|
||||
public void setOmitNorms(boolean omitNorms) { this.omitNorms=omitNorms; }
|
||||
|
||||
/** @deprecated Renamed to {@link #setOmitTermFreqAndPositions} */
|
||||
public void setOmitTf(boolean omitTermFreqAndPositions) { this.omitTermFreqAndPositions=omitTermFreqAndPositions; }
|
||||
|
||||
/** Expert:
|
||||
*
|
||||
* If set, omit term freq, positions and payloads from
|
||||
|
|
|
@ -164,21 +164,6 @@ public final class Document implements java.io.Serializable {
|
|||
return null;
|
||||
}
|
||||
|
||||
/** Returns an Enumeration of all the fields in a document.
|
||||
* @deprecated use {@link #getFields()} instead
|
||||
*/
|
||||
public final Enumeration<Fieldable> fields() {
|
||||
return new Enumeration<Fieldable>() {
|
||||
final Iterator<Fieldable> iter = fields.iterator();
|
||||
public boolean hasMoreElements() {
|
||||
return iter.hasNext();
|
||||
}
|
||||
public Fieldable nextElement() {
|
||||
return iter.next();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/** Returns a List of all the fields in a document.
|
||||
* <p>Note that fields which are <i>not</i> {@link Fieldable#isStored() stored} are
|
||||
* <i>not</i> available in documents retrieved from the
|
||||
|
@ -277,7 +262,7 @@ public final class Document implements java.io.Serializable {
|
|||
List<byte[]> result = new ArrayList<byte[]>();
|
||||
for (Fieldable field : fields) {
|
||||
if (field.name().equals(name) && (field.isBinary()))
|
||||
result.add(field.binaryValue());
|
||||
result.add(field.getBinaryValue());
|
||||
}
|
||||
|
||||
if (result.size() == 0)
|
||||
|
@ -298,7 +283,7 @@ public final class Document implements java.io.Serializable {
|
|||
public final byte[] getBinaryValue(String name) {
|
||||
for (Fieldable field : fields) {
|
||||
if (field.name().equals(name) && (field.isBinary()))
|
||||
return field.binaryValue();
|
||||
return field.getBinaryValue();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -70,18 +70,12 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
* common text. */
|
||||
public static final Index ANALYZED = new Index("ANALYZED");
|
||||
|
||||
/** @deprecated this has been renamed to {@link #ANALYZED} */
|
||||
public static final Index TOKENIZED = ANALYZED;
|
||||
|
||||
/** Index the field's value without using an Analyzer, so it can be searched.
|
||||
* As no analyzer is used the value will be stored as a single term. This is
|
||||
* useful for unique Ids like product numbers.
|
||||
*/
|
||||
public static final Index NOT_ANALYZED = new Index("NOT_ANALYZED");
|
||||
|
||||
/** @deprecated This has been renamed to {@link #NOT_ANALYZED} */
|
||||
public static final Index UN_TOKENIZED = NOT_ANALYZED;
|
||||
|
||||
/** Expert: Index the field's value without an Analyzer,
|
||||
* and also disable the storing of norms. Note that you
|
||||
* can also separately enable/disable norms by calling
|
||||
|
@ -98,10 +92,6 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
* from the beginning. */
|
||||
public static final Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
|
||||
|
||||
/** @deprecated This has been renamed to
|
||||
* {@link #NOT_ANALYZED_NO_NORMS} */
|
||||
public static final Index NO_NORMS = NOT_ANALYZED_NO_NORMS;
|
||||
|
||||
/** Expert: Index the tokens produced by running the
|
||||
* field's value through an Analyzer, and also
|
||||
* separately disable the storing of norms. See
|
||||
|
@ -159,29 +149,7 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
* binary value is used. Exactly one of stringValue(),
|
||||
* readerValue(), and getBinaryValue() must be set. */
|
||||
public Reader readerValue() { return fieldsData instanceof Reader ? (Reader)fieldsData : null; }
|
||||
|
||||
/** The value of the field in Binary, or null. If null, the Reader value,
|
||||
* or String value is used. Exactly one of stringValue(),
|
||||
* readerValue(), and getBinaryValue() must be set.
|
||||
* @deprecated This method must allocate a new byte[] if
|
||||
* the {@link AbstractField#getBinaryOffset()} is non-zero
|
||||
* or {@link AbstractField#getBinaryLength()} is not the
|
||||
* full length of the byte[]. Please use {@link
|
||||
* AbstractField#getBinaryValue()} instead, which simply
|
||||
* returns the byte[].
|
||||
*/
|
||||
public byte[] binaryValue() {
|
||||
if (!isBinary)
|
||||
return null;
|
||||
final byte[] data = (byte[]) fieldsData;
|
||||
if (binaryOffset == 0 && data.length == binaryLength)
|
||||
return data; //Optimization
|
||||
|
||||
final byte[] ret = new byte[binaryLength];
|
||||
System.arraycopy(data, binaryOffset, ret, 0, binaryLength);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** The TokesStream for this field to be used when indexing, or null. If null, the Reader value
|
||||
* or String value is analyzed to produce the indexed tokens. */
|
||||
public TokenStream tokenStreamValue() { return tokenStream; }
|
||||
|
@ -236,22 +204,8 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
binaryOffset = offset;
|
||||
}
|
||||
|
||||
|
||||
/** Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>.
|
||||
* @deprecated use {@link #setTokenStream} */
|
||||
public void setValue(TokenStream value) {
|
||||
if (isBinary) {
|
||||
throw new IllegalArgumentException("cannot set a TokenStream value on a binary field");
|
||||
}
|
||||
if (isStored) {
|
||||
throw new IllegalArgumentException("cannot set a TokenStream value on a stored field");
|
||||
}
|
||||
fieldsData = null;
|
||||
tokenStream = value;
|
||||
}
|
||||
|
||||
/** Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
|
||||
* May be combined with stored values from stringValue() or binaryValue() */
|
||||
* May be combined with stored values from stringValue() or getBinaryValue() */
|
||||
public void setTokenStream(TokenStream tokenStream) {
|
||||
this.isIndexed = true;
|
||||
this.isTokenized = true;
|
||||
|
|
|
@ -18,6 +18,8 @@ package org.apache.lucene.document;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.index.FieldInvertState; // for javadocs
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.io.Serializable;
|
||||
|
@ -77,7 +79,7 @@ public interface Fieldable extends Serializable {
|
|||
/** The value of the field as a String, or null.
|
||||
* <p>
|
||||
* For indexing, if isStored()==true, the stringValue() will be used as the stored field value
|
||||
* unless isBinary()==true, in which case binaryValue() will be used.
|
||||
* unless isBinary()==true, in which case getBinaryValue() will be used.
|
||||
*
|
||||
* If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
|
||||
* If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
|
||||
|
@ -90,11 +92,6 @@ public interface Fieldable extends Serializable {
|
|||
*/
|
||||
public Reader readerValue();
|
||||
|
||||
/** The value of the field in Binary, or null.
|
||||
* @see #stringValue()
|
||||
*/
|
||||
public byte[] binaryValue();
|
||||
|
||||
/** The TokenStream for this field to be used when indexing, or null.
|
||||
* @see #stringValue()
|
||||
*/
|
||||
|
@ -147,15 +144,9 @@ public interface Fieldable extends Serializable {
|
|||
*/
|
||||
void setOmitNorms(boolean omitNorms);
|
||||
|
||||
/** @deprecated Renamed to {@link AbstractField#setOmitTermFreqAndPositions} */
|
||||
void setOmitTf(boolean omitTf);
|
||||
|
||||
/** @deprecated Renamed to {@link AbstractField#getOmitTermFreqAndPositions} */
|
||||
boolean getOmitTf();
|
||||
|
||||
/**
|
||||
* Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
|
||||
* it's values via {@link #stringValue()} or {@link #binaryValue()} is only valid as long as the {@link org.apache.lucene.index.IndexReader} that
|
||||
* it's values via {@link #stringValue()} or {@link #getBinaryValue()} is only valid as long as the {@link org.apache.lucene.index.IndexReader} that
|
||||
* retrieved the {@link Document} is still open.
|
||||
*
|
||||
* @return true if this field can be loaded lazily
|
||||
|
@ -193,7 +184,7 @@ public interface Fieldable extends Serializable {
|
|||
* About reuse: if you pass in the result byte[] and it is
|
||||
* used, likely the underlying implementation will hold
|
||||
* onto this byte[] and return it in future calls to
|
||||
* {@link #binaryValue()} or {@link #getBinaryValue()}.
|
||||
* {@link #getBinaryValue()}.
|
||||
* So if you subsequently re-use the same byte[] elsewhere
|
||||
* it will alter this Fieldable's value.
|
||||
* @param result User defined buffer that will be used if
|
||||
|
@ -202,4 +193,20 @@ public interface Fieldable extends Serializable {
|
|||
* @return reference to the Field value as byte[].
|
||||
*/
|
||||
abstract byte[] getBinaryValue(byte[] result);
|
||||
|
||||
/** @see #setOmitTermFreqAndPositions */
|
||||
boolean getOmitTermFreqAndPositions();
|
||||
|
||||
/** Expert:
|
||||
*
|
||||
* If set, omit term freq, positions and payloads from
|
||||
* postings for this field.
|
||||
*
|
||||
* <p><b>NOTE</b>: While this option reduces storage space
|
||||
* required in the index, it also means any query
|
||||
* requiring positional information, such as {@link
|
||||
* PhraseQuery} or {@link SpanQuery} subclasses will
|
||||
* silently fail to find results.
|
||||
*/
|
||||
void setOmitTermFreqAndPositions(boolean omitTermFreqAndPositions);
|
||||
}
|
||||
|
|
|
@ -206,11 +206,6 @@ public final class NumericField extends AbstractField {
|
|||
return isIndexed() ? tokenStream : null;
|
||||
}
|
||||
|
||||
/** Returns always <code>null</code> for numeric fields */
|
||||
public byte[] binaryValue() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Returns always <code>null</code> for numeric fields */
|
||||
@Override
|
||||
public byte[] getBinaryValue(byte[] result){
|
||||
|
|
|
@ -190,7 +190,7 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread {
|
|||
// easily add it
|
||||
FieldInfo fi = fieldInfos.add(fieldName, field.isIndexed(), field.isTermVectorStored(),
|
||||
field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(),
|
||||
field.getOmitNorms(), false, field.getOmitTf());
|
||||
field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
|
||||
|
||||
fp = new DocFieldProcessorPerField(this, fi);
|
||||
fp.next = fieldHash[hashPos];
|
||||
|
@ -202,7 +202,7 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread {
|
|||
} else
|
||||
fp.fieldInfo.update(field.isIndexed(), field.isTermVectorStored(),
|
||||
field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(),
|
||||
field.getOmitNorms(), false, field.getOmitTf());
|
||||
field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
|
||||
|
||||
if (thisFieldGen != fp.lastGen) {
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ final class FieldInfos {
|
|||
while (fieldIterator.hasNext()) {
|
||||
Fieldable field = (Fieldable) fieldIterator.next();
|
||||
add(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(),
|
||||
field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTf());
|
||||
field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -425,16 +425,9 @@ final class FieldsReader implements Cloneable {
|
|||
return localFieldsStream;
|
||||
}
|
||||
|
||||
/** The value of the field in Binary, or null. If null, the Reader value,
|
||||
* String value, or TokenStream value is used. Exactly one of stringValue(),
|
||||
* readerValue(), binaryValue(), and tokenStreamValue() must be set. */
|
||||
public byte[] binaryValue() {
|
||||
return getBinaryValue(null);
|
||||
}
|
||||
|
||||
/** The value of the field as a Reader, or null. If null, the String value,
|
||||
* binary value, or TokenStream value is used. Exactly one of stringValue(),
|
||||
* readerValue(), binaryValue(), and tokenStreamValue() must be set. */
|
||||
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
|
||||
public Reader readerValue() {
|
||||
ensureOpen();
|
||||
return null;
|
||||
|
@ -442,7 +435,7 @@ final class FieldsReader implements Cloneable {
|
|||
|
||||
/** The value of the field as a TokenStream, or null. If null, the Reader value,
|
||||
* String value, or binary value is used. Exactly one of stringValue(),
|
||||
* readerValue(), binaryValue(), and tokenStreamValue() must be set. */
|
||||
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
|
||||
public TokenStream tokenStreamValue() {
|
||||
ensureOpen();
|
||||
return null;
|
||||
|
@ -450,7 +443,7 @@ final class FieldsReader implements Cloneable {
|
|||
|
||||
/** The value of the field as a String, or null. If null, the Reader value,
|
||||
* binary value, or TokenStream value is used. Exactly one of stringValue(),
|
||||
* readerValue(), binaryValue(), and tokenStreamValue() must be set. */
|
||||
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
|
||||
public String stringValue() {
|
||||
ensureOpen();
|
||||
if (isBinary)
|
||||
|
|
|
@ -169,7 +169,7 @@ class DocHelper {
|
|||
if (f.isStored()) add(stored,f);
|
||||
else add(unstored,f);
|
||||
if (f.getOmitNorms()) add(noNorms,f);
|
||||
if (f.getOmitTf()) add(noTf,f);
|
||||
if (f.getOmitTermFreqAndPositions()) add(noTf,f);
|
||||
if (f.isLazy()) add(lazy, f);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(field.isStoreOffsetWithTermVector() == true);
|
||||
assertTrue(field.isStorePositionWithTermVector() == true);
|
||||
assertTrue(field.getOmitNorms() == false);
|
||||
assertTrue(field.getOmitTf() == false);
|
||||
assertTrue(field.getOmitTermFreqAndPositions() == false);
|
||||
|
||||
field = doc.getField(DocHelper.TEXT_FIELD_3_KEY);
|
||||
assertTrue(field != null);
|
||||
|
@ -80,7 +80,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(field.isStoreOffsetWithTermVector() == false);
|
||||
assertTrue(field.isStorePositionWithTermVector() == false);
|
||||
assertTrue(field.getOmitNorms() == true);
|
||||
assertTrue(field.getOmitTf() == false);
|
||||
assertTrue(field.getOmitTermFreqAndPositions() == false);
|
||||
|
||||
field = doc.getField(DocHelper.NO_TF_KEY);
|
||||
assertTrue(field != null);
|
||||
|
@ -88,7 +88,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(field.isStoreOffsetWithTermVector() == false);
|
||||
assertTrue(field.isStorePositionWithTermVector() == false);
|
||||
assertTrue(field.getOmitNorms() == false);
|
||||
assertTrue(field.getOmitTf() == true);
|
||||
assertTrue(field.getOmitTermFreqAndPositions() == true);
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue("field is null and it shouldn't be", field != null);
|
||||
assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null);
|
||||
|
||||
byte [] bytes = field.binaryValue();
|
||||
byte [] bytes = field.getBinaryValue();
|
||||
assertTrue("bytes is null and it shouldn't be", bytes != null);
|
||||
assertTrue("", DocHelper.LAZY_FIELD_BINARY_BYTES.length == bytes.length);
|
||||
for (int i = 0; i < bytes.length; i++) {
|
||||
|
@ -286,9 +286,9 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(f1.isBinary());
|
||||
assertTrue(!f3.isBinary());
|
||||
assertTrue(fb.isBinary());
|
||||
assertSizeEquals(2*DocHelper.FIELD_1_TEXT.length(), f1.binaryValue());
|
||||
assertSizeEquals(2*DocHelper.FIELD_1_TEXT.length(), f1.getBinaryValue());
|
||||
assertEquals(DocHelper.FIELD_3_TEXT, f3.stringValue());
|
||||
assertSizeEquals(DocHelper.LAZY_FIELD_BINARY_BYTES.length, fb.binaryValue());
|
||||
assertSizeEquals(DocHelper.LAZY_FIELD_BINARY_BYTES.length, fb.getBinaryValue());
|
||||
|
||||
reader.close();
|
||||
}
|
||||
|
|
|
@ -467,7 +467,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content","\u0633\u0627\u0628",
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
|
|
@ -660,9 +660,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
|
|||
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", words[docnum],
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field("body", "body",
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.optimize();
|
||||
|
|
|
@ -343,9 +343,9 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
|
|||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content","\u0633\u0627\u0628",
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field("body", "body",
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
||||
writer.optimize();
|
||||
|
@ -387,9 +387,9 @@ public class TestTermRangeFilter extends BaseTestRangeFilter {
|
|||
for (int docnum = 0 ; docnum < words.length ; ++docnum) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", words[docnum],
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
doc.add(new Field("body", "body",
|
||||
Field.Store.YES, Field.Index.UN_TOKENIZED));
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.optimize();
|
||||
|
|
|
@ -421,8 +421,8 @@ public class TestSpans extends LuceneTestCase {
|
|||
// LUCENE-1404
|
||||
private void addDoc(IndexWriter writer, String id, String text) throws IOException {
|
||||
final Document doc = new Document();
|
||||
doc.add( new Field("id", id, Field.Store.YES, Field.Index.UN_TOKENIZED) );
|
||||
doc.add( new Field("text", text, Field.Store.YES, Field.Index.TOKENIZED) );
|
||||
doc.add( new Field("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED) );
|
||||
doc.add( new Field("text", text, Field.Store.YES, Field.Index.ANALYZED) );
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue