LUCENE-2858: Removal of code duplication in docFreq (made final in AtomicReader and use fields())

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2858@1237668 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2012-01-30 14:13:07 +00:00
parent 2338018bb0
commit 4091d801c1
4 changed files with 4 additions and 47 deletions

View File

@ -776,15 +776,6 @@ public class MemoryIndex {
return fieldInfos;
}
@Override
public int docFreq(String field, BytesRef term) {
Info info = getInfo(field);
int freq = 0;
if (info != null) freq = info.getPositions(term) != null ? 1 : 0;
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + field + ":" + term + ", freq:" + freq);
return freq;
}
private class MemoryFields extends Fields {
@Override
public FieldsEnum iterator() {

View File

@ -83,19 +83,11 @@ public abstract class AtomicReader extends IndexReader {
* Returns {@link Fields} for this reader.
* This method may return null if the reader has no
* postings.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need a {@link Fields} for such a reader,
* use {@link MultiFields#getFields}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
*/
public abstract Fields fields() throws IOException;
@Override
public int docFreq(String field, BytesRef term) throws IOException {
public final int docFreq(String field, BytesRef term) throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
@ -225,12 +217,7 @@ public abstract class AtomicReader extends IndexReader {
/** Returns the number of unique terms (across all fields)
* in this reader.
*
* @return number of unique terms or -1 if this count
* cannot be easily determined (eg Multi*Readers).
* Instead, you should call {@link
* #getSequentialSubReaders} and ask each sub reader for
* its unique term count. */
*/
public final long getUniqueTermCount() throws IOException {
final Fields fields = fields();
if (fields == null) {
@ -243,15 +230,7 @@ public abstract class AtomicReader extends IndexReader {
* Returns {@link DocValues} for this field.
* This method may return null if the reader has no per-document
* values stored.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need {@link DocValues} for such a reader,
* use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
*/
public abstract DocValues docValues(String field) throws IOException;
public abstract DocValues normValues(String field) throws IOException;

View File

@ -332,12 +332,6 @@ public class FilterIndexReader extends AtomicReader {
return in.hasNorms(field);
}
@Override
public int docFreq(String field, BytesRef t) throws IOException {
ensureOpen();
return in.docFreq(field, t);
}
@Override
protected void doClose() throws IOException {
in.close();

View File

@ -261,13 +261,6 @@ public class ParallelReader extends AtomicReader {
return reader==null ? false : reader.hasNorms(field);
}
@Override
public int docFreq(String field, BytesRef term) throws IOException {
ensureOpen();
AtomicReader reader = fieldToReader.get(field);
return reader == null? 0 : reader.docFreq(field, term);
}
// for testing
AtomicReader[] getSubReaders() {
return readers.toArray(new AtomicReader[readers.size()]);