Upgrade to lucene-7.0.0-snapshot-ad2cb77. (elastic/x-pack-elasticsearch#1820)
Sibling of elastic/elasticsearch#25349. Original commit: elastic/x-pack-elasticsearch@2ab35a91e5
This commit is contained in:
parent
2eb939842d
commit
bd973aaadb
|
@ -267,8 +267,8 @@ public final class FieldSubsetReader extends FilterLeafReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Fields fields() throws IOException {
|
public Terms terms(String field) throws IOException {
|
||||||
return new FieldFilterFields(super.fields());
|
return wrapTerms(super.terms(field), field);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -344,24 +344,27 @@ public final class FieldSubsetReader extends FilterLeafReader {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Terms terms(String field) throws IOException {
|
public Terms terms(String field) throws IOException {
|
||||||
if (!hasField(field)) {
|
return wrapTerms(super.terms(field), field);
|
||||||
return null;
|
}
|
||||||
} else if (FieldNamesFieldMapper.NAME.equals(field)) {
|
}
|
||||||
// for the _field_names field, fields for the document
|
|
||||||
// are encoded as postings, where term is the field.
|
private Terms wrapTerms(Terms terms, String field) {
|
||||||
// so we hide terms for fields we filter out.
|
if (!hasField(field)) {
|
||||||
Terms terms = super.terms(field);
|
return null;
|
||||||
if (terms != null) {
|
} else if (FieldNamesFieldMapper.NAME.equals(field)) {
|
||||||
// check for null, in case term dictionary is not a ghostbuster
|
// for the _field_names field, fields for the document
|
||||||
// So just because its in fieldinfos and "indexed=true" doesn't mean you can go grab a Terms for it.
|
// are encoded as postings, where term is the field.
|
||||||
// It just means at one point there was a document with that field indexed...
|
// so we hide terms for fields we filter out.
|
||||||
// The fields infos isn't updates/removed even if no docs refer to it
|
if (terms != null) {
|
||||||
terms = new FieldNamesTerms(terms);
|
// check for null, in case term dictionary is not a ghostbuster
|
||||||
}
|
// So just because its in fieldinfos and "indexed=true" doesn't mean you can go grab a Terms for it.
|
||||||
return terms;
|
// It just means at one point there was a document with that field indexed...
|
||||||
} else {
|
// The fields infos isn't updates/removed even if no docs refer to it
|
||||||
return super.terms(field);
|
terms = new FieldNamesTerms(terms);
|
||||||
}
|
}
|
||||||
|
return terms;
|
||||||
|
} else {
|
||||||
|
return terms;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,14 +100,14 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
||||||
new TermQuery(new Term("field", "value1"))));
|
new TermQuery(new Term("field", "value1"))));
|
||||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||||
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||||
assertThat(result.totalHits, equalTo(1));
|
assertThat(result.totalHits, equalTo(1L));
|
||||||
assertThat(result.scoreDocs[0].doc, equalTo(0));
|
assertThat(result.scoreDocs[0].doc, equalTo(0));
|
||||||
|
|
||||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||||
new TermQuery(new Term("field", "value2"))));
|
new TermQuery(new Term("field", "value2"))));
|
||||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||||
assertThat(result.totalHits, equalTo(1));
|
assertThat(result.totalHits, equalTo(1L));
|
||||||
assertThat(result.scoreDocs[0].doc, equalTo(1));
|
assertThat(result.scoreDocs[0].doc, equalTo(1));
|
||||||
|
|
||||||
// this doc has been marked as deleted:
|
// this doc has been marked as deleted:
|
||||||
|
@ -115,13 +115,13 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
||||||
new TermQuery(new Term("field", "value3"))));
|
new TermQuery(new Term("field", "value3"))));
|
||||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0));
|
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0));
|
||||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||||
assertThat(result.totalHits, equalTo(0));
|
assertThat(result.totalHits, equalTo(0L));
|
||||||
|
|
||||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||||
new TermQuery(new Term("field", "value4"))));
|
new TermQuery(new Term("field", "value4"))));
|
||||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||||
assertThat(result.totalHits, equalTo(1));
|
assertThat(result.totalHits, equalTo(1L));
|
||||||
assertThat(result.scoreDocs[0].doc, equalTo(3));
|
assertThat(result.scoreDocs[0].doc, equalTo(3));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ import org.apache.lucene.document.StringField;
|
||||||
import org.apache.lucene.document.TextField;
|
import org.apache.lucene.document.TextField;
|
||||||
import org.apache.lucene.index.BinaryDocValues;
|
import org.apache.lucene.index.BinaryDocValues;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
@ -87,8 +88,8 @@ public class FieldSubsetReaderTests extends ESTestCase {
|
||||||
// see only one field
|
// see only one field
|
||||||
LeafReader segmentReader = ir.leaves().get(0).reader();
|
LeafReader segmentReader = ir.leaves().get(0).reader();
|
||||||
Set<String> seenFields = new HashSet<>();
|
Set<String> seenFields = new HashSet<>();
|
||||||
for (String field : segmentReader.fields()) {
|
for (FieldInfo info : segmentReader.getFieldInfos()) {
|
||||||
seenFields.add(field);
|
seenFields.add(info.name);
|
||||||
}
|
}
|
||||||
assertEquals(Collections.singleton("fieldA"), seenFields);
|
assertEquals(Collections.singleton("fieldA"), seenFields);
|
||||||
assertNotNull(segmentReader.terms("fieldA"));
|
assertNotNull(segmentReader.terms("fieldA"));
|
||||||
|
@ -937,13 +938,12 @@ public class FieldSubsetReaderTests extends ESTestCase {
|
||||||
|
|
||||||
// see no fields
|
// see no fields
|
||||||
LeafReader segmentReader = ir.leaves().get(0).reader();
|
LeafReader segmentReader = ir.leaves().get(0).reader();
|
||||||
Fields f = segmentReader.fields();
|
|
||||||
assertNotNull(f); // 5.x contract
|
|
||||||
Set<String> seenFields = new HashSet<>();
|
Set<String> seenFields = new HashSet<>();
|
||||||
for (String field : segmentReader.fields()) {
|
for (FieldInfo info : segmentReader.getFieldInfos()) {
|
||||||
seenFields.add(field);
|
seenFields.add(info.name);
|
||||||
}
|
}
|
||||||
assertEquals(0, seenFields.size());
|
assertEquals(0, seenFields.size());
|
||||||
|
assertNull(segmentReader.terms("foo"));
|
||||||
|
|
||||||
// see no vectors
|
// see no vectors
|
||||||
assertNull(segmentReader.getTermVectors(0));
|
assertNull(segmentReader.getTermVectors(0));
|
||||||
|
|
Loading…
Reference in New Issue