mirror of https://github.com/apache/lucene.git
LUCENE-3970: rename getUniqueTerm/FieldCount() to size()
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1312037 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
610d93357c
commit
5bae28d57e
|
@ -259,6 +259,9 @@ Changes in backwards compatibility policy
|
|||
|
||||
* LUCENE-2000: clone() now returns covariant types where possible. (ryan)
|
||||
|
||||
* LUCENE-3970: Rename Fields.getUniqueFieldCount -> .size() and
|
||||
Terms.getUniqueTermCount -> .size(). (Iulius Curt via Mike McCandless)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
* LUCENE-2846: omitNorms now behaves like omitTermFrequencyAndPositions, if you
|
||||
|
|
|
@ -759,7 +759,7 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return info.sortedTerms.length;
|
||||
}
|
||||
|
||||
|
@ -785,7 +785,7 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return sortedFields.length;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -202,7 +202,7 @@ public class BlockTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return fields.size();
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ public class BlockTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return numTerms;
|
||||
}
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return fields.size();
|
||||
}
|
||||
|
||||
|
@ -455,7 +455,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return numTerms;
|
||||
}
|
||||
|
||||
|
|
|
@ -166,17 +166,17 @@ public abstract class TermVectorsWriter implements Closeable {
|
|||
/** Safe (but, slowish) default method to write every
|
||||
* vector field in the document. This default
|
||||
* implementation requires that the vectors implement
|
||||
* both Fields.getUniqueFieldCount and
|
||||
* Terms.getUniqueTermCount. */
|
||||
* both Fields.size and
|
||||
* Terms.size. */
|
||||
protected final void addAllDocVectors(Fields vectors, FieldInfos fieldInfos) throws IOException {
|
||||
if (vectors == null) {
|
||||
startDocument(0);
|
||||
return;
|
||||
}
|
||||
|
||||
final int numFields = vectors.getUniqueFieldCount();
|
||||
final int numFields = vectors.size();
|
||||
if (numFields == -1) {
|
||||
throw new IllegalStateException("vectors.getUniqueFieldCount() must be implemented (it returned -1)");
|
||||
throw new IllegalStateException("vectors.size() must be implemented (it returned -1)");
|
||||
}
|
||||
startDocument(numFields);
|
||||
|
||||
|
@ -195,9 +195,9 @@ public abstract class TermVectorsWriter implements Closeable {
|
|||
// FieldsEnum shouldn't lie...
|
||||
continue;
|
||||
}
|
||||
final int numTerms = (int) terms.getUniqueTermCount();
|
||||
final int numTerms = (int) terms.size();
|
||||
if (numTerms == -1) {
|
||||
throw new IllegalStateException("vector.getUniqueTermCount() must be implemented (it returned -1)");
|
||||
throw new IllegalStateException("terms.size() must be implemented (it returned -1)");
|
||||
}
|
||||
final TermsEnum termsEnum = terms.iterator(null);
|
||||
|
||||
|
|
|
@ -161,7 +161,7 @@ class Lucene3xFields extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return preTerms.size();
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ class Lucene3xFields extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public long size() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ class Lucene3xTermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
if (fieldNumbers == null) {
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -307,7 +307,7 @@ class Lucene3xTermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return numTerms;
|
||||
}
|
||||
|
||||
|
@ -660,7 +660,7 @@ class Lucene3xTermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
if (tvx != null) {
|
||||
Fields fields = new TVFields(docID);
|
||||
if (fields.getUniqueFieldCount() == 0) {
|
||||
if (fields.size() == 0) {
|
||||
// TODO: we can improve writer here, eg write 0 into
|
||||
// tvx file, so we know on first read from tvx that
|
||||
// this doc has no TVs
|
||||
|
|
|
@ -300,7 +300,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
if (fieldNumbers == null) {
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -335,7 +335,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return numTerms;
|
||||
}
|
||||
|
||||
|
@ -674,7 +674,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
if (tvx != null) {
|
||||
Fields fields = new TVFields(docID);
|
||||
if (fields.getUniqueFieldCount() == 0) {
|
||||
if (fields.size() == 0) {
|
||||
// TODO: we can improve writer here, eg write 0 into
|
||||
// tvx file, so we know on first read from tvx that
|
||||
// this doc has no TVs
|
||||
|
|
|
@ -824,7 +824,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public long size() throws IOException {
|
||||
return termCount;
|
||||
}
|
||||
|
||||
|
@ -888,7 +888,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return fields.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return fields.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -596,7 +596,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return (long) termCount;
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() throws IOException {
|
||||
public int size() throws IOException {
|
||||
return fields.size();
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public long size() throws IOException {
|
||||
return terms.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -1051,7 +1051,7 @@ public class CheckIndex {
|
|||
long termCount = -1;
|
||||
|
||||
if (status.termCount-termCountStart > 0) {
|
||||
termCount = fields.terms(field).getUniqueTermCount();
|
||||
termCount = fields.terms(field).size();
|
||||
|
||||
if (termCount != -1 && termCount != status.termCount - termCountStart) {
|
||||
throw new RuntimeException("termCount mismatch " + termCount + " vs " + (status.termCount - termCountStart));
|
||||
|
@ -1104,7 +1104,7 @@ public class CheckIndex {
|
|||
}
|
||||
}
|
||||
|
||||
int fieldCount = fields.getUniqueFieldCount();
|
||||
int fieldCount = fields.size();
|
||||
|
||||
if (fieldCount != -1) {
|
||||
if (fieldCount < 0) {
|
||||
|
|
|
@ -36,7 +36,7 @@ public abstract class Fields {
|
|||
* measure isn't stored by the codec. Note that, just like
|
||||
* other term measures, this measure does not take deleted
|
||||
* documents into account. */
|
||||
public abstract int getUniqueFieldCount() throws IOException;
|
||||
public abstract int size() throws IOException;
|
||||
|
||||
/** Returns the number of terms for all fields, or -1 if this
|
||||
* measure isn't stored by the codec. Note that, just like
|
||||
|
@ -53,7 +53,7 @@ public abstract class Fields {
|
|||
}
|
||||
Terms terms = terms(field);
|
||||
if (terms != null) {
|
||||
final long termCount = terms.getUniqueTermCount();
|
||||
final long termCount = terms.size();
|
||||
if (termCount == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -56,8 +56,8 @@ public class FilterAtomicReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() throws IOException {
|
||||
return in.getUniqueFieldCount();
|
||||
public int size() throws IOException {
|
||||
return in.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,8 +86,8 @@ public class FilterAtomicReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
return in.getUniqueTermCount();
|
||||
public long size() throws IOException {
|
||||
return in.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -228,7 +228,7 @@ public final class MultiFields extends Fields {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ public final class MultiTerms extends Terms {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public long size() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ public final class ParallelAtomicReader extends AtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() throws IOException {
|
||||
public int size() throws IOException {
|
||||
return fields.size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public abstract class Terms {
|
|||
* measure isn't stored by the codec. Note that, just like
|
||||
* other term measures, this measure does not take deleted
|
||||
* documents into account. */
|
||||
public abstract long getUniqueTermCount() throws IOException;
|
||||
public abstract long size() throws IOException;
|
||||
|
||||
/** Returns the sum of {@link TermsEnum#totalTermFreq} for
|
||||
* all terms in this field, or -1 if this measure isn't
|
||||
|
|
|
@ -1114,7 +1114,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// is fine -- GrowableWriter will reallocate as needed
|
||||
long numUniqueTerms = 0;
|
||||
try {
|
||||
numUniqueTerms = terms.getUniqueTermCount();
|
||||
numUniqueTerms = terms.size();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
numUniqueTerms = -1;
|
||||
}
|
||||
|
@ -1165,7 +1165,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
if (termOrd == termOrdToBytesOffset.size()) {
|
||||
// NOTE: this code only runs if the incoming
|
||||
// reader impl doesn't implement
|
||||
// getUniqueTermCount (which should be uncommon)
|
||||
// size (which should be uncommon)
|
||||
termOrdToBytesOffset = termOrdToBytesOffset.resize(ArrayUtil.oversize(1+termOrd, 1));
|
||||
}
|
||||
termOrdToBytesOffset.set(termOrd, bytes.copyUsingLengthPrefix(term));
|
||||
|
@ -1252,7 +1252,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// is fine -- GrowableWriter will reallocate as needed
|
||||
long numUniqueTerms = 0;
|
||||
try {
|
||||
numUniqueTerms = terms.getUniqueTermCount();
|
||||
numUniqueTerms = terms.size();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
numUniqueTerms = -1;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Random;
|
|||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene40.Lucene40PostingsFormat;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
|
@ -63,7 +62,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
enums.put(docs, true);
|
||||
}
|
||||
|
||||
assertEquals(terms.getUniqueTermCount(), enums.size());
|
||||
assertEquals(terms.size(), enums.size());
|
||||
}
|
||||
}.run();
|
||||
IOUtils.close(writer, open, dir);
|
||||
|
@ -100,7 +99,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random.nextBoolean());
|
||||
enums.put(docs, true);
|
||||
}
|
||||
assertEquals(terms.getUniqueTermCount(), enums.size());
|
||||
assertEquals(terms.size(), enums.size());
|
||||
|
||||
enums.clear();
|
||||
iterator = terms.iterator(null);
|
||||
|
@ -141,7 +140,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
docs = iterator.docs(null, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean());
|
||||
enums.put(docs, true);
|
||||
}
|
||||
assertEquals(terms.getUniqueTermCount(), enums.size());
|
||||
assertEquals(terms.size(), enums.size());
|
||||
|
||||
iterator = terms.iterator(null);
|
||||
enums.clear();
|
||||
|
@ -150,7 +149,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
|
|||
docs = iterator.docs(bits, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean());
|
||||
enums.put(docs, true);
|
||||
}
|
||||
assertEquals(terms.getUniqueTermCount(), enums.size());
|
||||
assertEquals(terms.size(), enums.size());
|
||||
}
|
||||
IOUtils.close(writer, firstReader, secondReader, dir);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.io.StringReader;
|
|||
|
||||
import org.apache.lucene.analysis.EmptyTokenizer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.Fields;
|
||||
|
@ -345,7 +344,7 @@ public class TestDocument extends LuceneTestCase {
|
|||
Fields tvFields = r.getTermVectors(0);
|
||||
Terms tvs = tvFields.terms(field);
|
||||
assertNotNull(tvs);
|
||||
assertEquals(2, tvs.getUniqueTermCount());
|
||||
assertEquals(2, tvs.size());
|
||||
TermsEnum tvsEnum = tvs.iterator(null);
|
||||
assertEquals(new BytesRef("abc"), tvsEnum.next());
|
||||
final DocsAndPositionsEnum dpEnum = tvsEnum.docsAndPositions(null, null, false);
|
||||
|
|
|
@ -290,11 +290,11 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
// f1
|
||||
Terms tfv1 = reader.getTermVectors(0).terms("f1");
|
||||
assertNotNull(tfv1);
|
||||
assertEquals("the 'with_tv' setting should rule!",2,tfv1.getUniqueTermCount());
|
||||
assertEquals("the 'with_tv' setting should rule!",2,tfv1.size());
|
||||
// f2
|
||||
Terms tfv2 = reader.getTermVectors(0).terms("f2");
|
||||
assertNotNull(tfv2);
|
||||
assertEquals("the 'with_tv' setting should rule!",2,tfv2.getUniqueTermCount());
|
||||
assertEquals("the 'with_tv' setting should rule!",2,tfv2.size());
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -184,8 +184,8 @@ public class TestDuelingCodecs extends LuceneTestCase {
|
|||
* checks that top-level statistics on Fields are the same
|
||||
*/
|
||||
public void assertFieldStatistics(Fields leftFields, Fields rightFields) throws Exception {
|
||||
if (leftFields.getUniqueFieldCount() != -1 && rightFields.getUniqueFieldCount() != -1) {
|
||||
assertEquals(info, leftFields.getUniqueFieldCount(), rightFields.getUniqueFieldCount());
|
||||
if (leftFields.size() != -1 && rightFields.size() != -1) {
|
||||
assertEquals(info, leftFields.size(), rightFields.size());
|
||||
}
|
||||
|
||||
if (leftFields.getUniqueTermCount() != -1 && rightFields.getUniqueTermCount() != -1) {
|
||||
|
@ -238,8 +238,8 @@ public class TestDuelingCodecs extends LuceneTestCase {
|
|||
if (leftTerms.getSumTotalTermFreq() != -1 && rightTerms.getSumTotalTermFreq() != -1) {
|
||||
assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
|
||||
}
|
||||
if (leftTerms.getUniqueTermCount() != -1 && rightTerms.getUniqueTermCount() != -1) {
|
||||
assertEquals(info, leftTerms.getUniqueTermCount(), rightTerms.getUniqueTermCount());
|
||||
if (leftTerms.size() != -1 && rightTerms.size() != -1) {
|
||||
assertEquals(info, leftTerms.size(), rightTerms.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,10 +19,8 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -120,7 +118,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
|
||||
Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
assertNotNull(vector);
|
||||
assertEquals(3, vector.getUniqueTermCount());
|
||||
assertEquals(3, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
|
||||
int i = 0;
|
||||
|
|
|
@ -193,7 +193,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
public void testTermVectors() throws IOException {
|
||||
Terms result = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
assertNotNull(result);
|
||||
assertEquals(3, result.getUniqueTermCount());
|
||||
assertEquals(3, result.size());
|
||||
TermsEnum termsEnum = result.iterator(null);
|
||||
while(termsEnum.next() != null) {
|
||||
String term = termsEnum.term().utf8ToString();
|
||||
|
@ -204,6 +204,6 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
|
||||
Fields results = reader.getTermVectors(0);
|
||||
assertTrue(results != null);
|
||||
assertEquals("We do not have 3 term freq vectors", 3, results.getUniqueFieldCount());
|
||||
assertEquals("We do not have 3 term freq vectors", 3, results.size());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -600,7 +600,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
public static void verifyEquals(Fields d1, Fields d2) throws IOException {
|
||||
if (d1 == null) {
|
||||
assertTrue(d2 == null || d2.getUniqueFieldCount() == 0);
|
||||
assertTrue(d2 == null || d2.size() == 0);
|
||||
return;
|
||||
}
|
||||
assertTrue(d2 != null);
|
||||
|
|
|
@ -204,7 +204,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
for (int j = 0; j < 5; j++) {
|
||||
Terms vector = reader.get(j).terms(testFields[0]);
|
||||
assertNotNull(vector);
|
||||
assertEquals(testTerms.length, vector.getUniqueTermCount());
|
||||
assertEquals(testTerms.length, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
for (int i = 0; i < testTerms.length; i++) {
|
||||
final BytesRef text = termsEnum.next();
|
||||
|
@ -223,7 +223,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
for (int j = 0; j < 5; j++) {
|
||||
Terms vector = reader.get(j).terms(testFields[0]);
|
||||
assertNotNull(vector);
|
||||
assertEquals(testTerms.length, vector.getUniqueTermCount());
|
||||
assertEquals(testTerms.length, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
DocsEnum docsEnum = null;
|
||||
for (int i = 0; i < testTerms.length; i++) {
|
||||
|
@ -250,7 +250,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
BytesRef[] terms;
|
||||
Terms vector = reader.get(0).terms(testFields[0]);
|
||||
assertNotNull(vector);
|
||||
assertEquals(testTerms.length, vector.getUniqueTermCount());
|
||||
assertEquals(testTerms.length, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
DocsAndPositionsEnum dpEnum = null;
|
||||
for (int i = 0; i < testTerms.length; i++) {
|
||||
|
@ -287,7 +287,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
|
||||
Terms freqVector = reader.get(0).terms(testFields[1]); //no pos, no offset
|
||||
assertNotNull(freqVector);
|
||||
assertEquals(testTerms.length, freqVector.getUniqueTermCount());
|
||||
assertEquals(testTerms.length, freqVector.size());
|
||||
termsEnum = freqVector.iterator(null);
|
||||
assertNotNull(termsEnum);
|
||||
for (int i = 0; i < testTerms.length; i++) {
|
||||
|
@ -306,7 +306,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
assertNotNull(vector);
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
assertNotNull(termsEnum);
|
||||
assertEquals(testTerms.length, vector.getUniqueTermCount());
|
||||
assertEquals(testTerms.length, vector.size());
|
||||
DocsAndPositionsEnum dpEnum = null;
|
||||
for (int i = 0; i < testTerms.length; i++) {
|
||||
final BytesRef text = termsEnum.next();
|
||||
|
|
|
@ -119,7 +119,7 @@ public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
public long size() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
});
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
for (int i = 0; i < hits.length; i++) {
|
||||
Fields vectors = searcher.reader.getTermVectors(hits[i].doc);
|
||||
assertNotNull(vectors);
|
||||
assertEquals("doc=" + hits[i].doc + " tv=" + vectors, 1, vectors.getUniqueFieldCount());
|
||||
assertEquals("doc=" + hits[i].doc + " tv=" + vectors, 1, vectors.size());
|
||||
}
|
||||
Terms vector;
|
||||
vector = searcher.reader.getTermVectors(hits[0].doc).terms("noTV");
|
||||
|
@ -121,13 +121,13 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
Fields v = reader.getTermVectors(0);
|
||||
assertEquals(4, v.getUniqueFieldCount());
|
||||
assertEquals(4, v.size());
|
||||
String[] expectedFields = new String[]{"a", "b", "c", "x"};
|
||||
int[] expectedPositions = new int[]{1, 2, 0};
|
||||
FieldsEnum fieldsEnum = v.iterator();
|
||||
for(int i=0;i<expectedFields.length;i++) {
|
||||
assertEquals(expectedFields[i], fieldsEnum.next());
|
||||
assertEquals(3, v.terms(expectedFields[i]).getUniqueTermCount());
|
||||
assertEquals(3, v.terms(expectedFields[i]).size());
|
||||
|
||||
DocsAndPositionsEnum dpEnum = null;
|
||||
Terms terms = fieldsEnum.terms();
|
||||
|
@ -166,7 +166,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
for (int i = 0; i < hits.length; i++) {
|
||||
Fields vectors = searcher.reader.getTermVectors(hits[i].doc);
|
||||
assertNotNull(vectors);
|
||||
assertEquals(1, vectors.getUniqueFieldCount());
|
||||
assertEquals(1, vectors.size());
|
||||
|
||||
TermsEnum termsEnum = vectors.terms("field").iterator(null);
|
||||
assertNotNull(termsEnum.next());
|
||||
|
@ -205,7 +205,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
for (int i = 0; i < hits.length; i++) {
|
||||
Fields vectors = searcher.reader.getTermVectors(hits[i].doc);
|
||||
assertNotNull(vectors);
|
||||
assertEquals(1, vectors.getUniqueFieldCount());
|
||||
assertEquals(1, vectors.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
Terms vector = knownSearcher.reader.getTermVectors(hits[1].doc).terms("field");
|
||||
assertNotNull(vector);
|
||||
//System.out.println("Vector: " + vector);
|
||||
assertEquals(10, vector.getUniqueTermCount());
|
||||
assertEquals(10, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
while(termsEnum.next() != null) {
|
||||
String term = termsEnum.term().utf8ToString();
|
||||
|
@ -371,7 +371,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
|
||||
Fields vectors = searcher.reader.getTermVectors(hits[i].doc);
|
||||
assertNotNull(vectors);
|
||||
assertEquals(1, vectors.getUniqueFieldCount());
|
||||
assertEquals(1, vectors.size());
|
||||
}
|
||||
reader.close();
|
||||
}
|
||||
|
@ -418,10 +418,10 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
|
||||
Fields vectors = searcher.reader.getTermVectors(hits[0].doc);
|
||||
assertNotNull(vectors);
|
||||
assertEquals(1, vectors.getUniqueFieldCount());
|
||||
assertEquals(1, vectors.size());
|
||||
Terms vector = vectors.terms("field");
|
||||
assertNotNull(vector);
|
||||
assertEquals(1, vector.getUniqueTermCount());
|
||||
assertEquals(1, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
assertNotNull(termsEnum.next());
|
||||
assertEquals("one", termsEnum.term().utf8ToString());
|
||||
|
|
|
@ -109,7 +109,7 @@ public class RAMOnlyPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() {
|
||||
public int size() {
|
||||
return fieldToTerms.size();
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ public class RAMOnlyPostingsFormat extends PostingsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getUniqueTermCount() {
|
||||
public long size() {
|
||||
return termToDocs.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ public final class FieldFilterAtomicReader extends FilterAtomicReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public int getUniqueFieldCount() throws IOException {
|
||||
public int size() throws IOException {
|
||||
// TODO: add faster implementation!
|
||||
int c = 0;
|
||||
final FieldsEnum it = iterator();
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -106,7 +105,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
IndexReader r = IndexReader.open(dir);
|
||||
Terms vector = r.getTermVectors(0).terms("field");
|
||||
assertEquals(1, vector.getUniqueTermCount());
|
||||
assertEquals(1, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator(null);
|
||||
termsEnum.next();
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
|
|
|
@ -752,7 +752,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
IndexReader reader = IndexReader.open(dir);
|
||||
Fields tfv = reader.getTermVectors(0);
|
||||
assertNotNull(tfv);
|
||||
assertTrue(tfv.getUniqueFieldCount() > 0);
|
||||
assertTrue(tfv.size() > 0);
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -595,7 +595,7 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
if (freq > tiq.minFreq) {
|
||||
UnicodeUtil.UTF8toUTF16(text, spare);
|
||||
String t = spare.toString();
|
||||
tiq.distinctTerms = new Long(terms.getUniqueTermCount()).intValue();
|
||||
tiq.distinctTerms = new Long(terms.size()).intValue();
|
||||
|
||||
tiq.add(new TopTermQueue.TermInfo(new Term(field, t), termsEnum.docFreq()));
|
||||
if (tiq.size() > numTerms) { // if tiq full
|
||||
|
|
Loading…
Reference in New Issue