Use the primary_term field to identify parent documents (#27469)
This change stops indexing the `_primary_term` field for nested documents to allow fast retrieval of parent documents. Today we create a docvalues field for children to ensure we have a dense datastructure on disk. Yet, since we only use the primary term to tie-break on when we see the same seqID on indexing having a dense datastructure is less important. We can use this now to improve the nested docs performance and it's memory footprint. Relates to #24362
This commit is contained in:
parent
6319424e4a
commit
5a0b6d1977
|
@ -25,13 +25,16 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -62,13 +65,20 @@ public class Queries {
|
|||
return new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")));
|
||||
}
|
||||
|
||||
public static Query newNonNestedFilter() {
|
||||
// TODO: this is slow, make it a positive query
|
||||
/**
|
||||
* Creates a new non-nested docs query
|
||||
* @param indexVersionCreated the index version created since newer indices can identify a parent field more efficiently
|
||||
*/
|
||||
public static Query newNonNestedFilter(Version indexVersionCreated) {
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
||||
} else {
|
||||
return new BooleanQuery.Builder()
|
||||
.add(new MatchAllDocsQuery(), Occur.FILTER)
|
||||
.add(newNestedFilter(), Occur.MUST_NOT)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
|
|
|
@ -249,7 +249,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
|
|||
}
|
||||
|
||||
if (hasNested) {
|
||||
warmUp.add(Queries.newNonNestedFilter());
|
||||
warmUp.add(Queries.newNonNestedFilter(indexSettings.getIndexVersionCreated()));
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -252,13 +253,19 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
|||
// we share the parent docs fields to ensure good compression
|
||||
SequenceIDFields seqID = context.seqID();
|
||||
assert seqID != null;
|
||||
for (int i = 1; i < context.docs().size(); i++) {
|
||||
int numDocs = context.docs().size();
|
||||
final Version versionCreated = context.mapperService().getIndexSettings().getIndexVersionCreated();
|
||||
final boolean includePrimaryTerm = versionCreated.before(Version.V_7_0_0_alpha1);
|
||||
for (int i = 1; i < numDocs; i++) {
|
||||
final Document doc = context.docs().get(i);
|
||||
doc.add(seqID.seqNo);
|
||||
doc.add(seqID.seqNoDocValue);
|
||||
if (includePrimaryTerm) {
|
||||
// primary terms are used to distinguish between parent and nested docs since 6.1.0
|
||||
doc.add(seqID.primaryTerm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String contentType() {
|
||||
|
|
|
@ -156,7 +156,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
.anyMatch(indexType::equals)) {
|
||||
if (context.getMapperService().hasNested()) {
|
||||
// type filters are expected not to match nested docs
|
||||
return Queries.newNonNestedFilter();
|
||||
return Queries.newNonNestedFilter(context.indexVersionCreated());
|
||||
} else {
|
||||
return new MatchAllDocsQuery();
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
Query innerQuery;
|
||||
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
|
||||
if (objectMapper == null) {
|
||||
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter());
|
||||
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated()));
|
||||
} else {
|
||||
parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter());
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
SearchHit hit = hits[i];
|
||||
Query rawParentFilter;
|
||||
if (parentObjectMapper == null) {
|
||||
rawParentFilter = Queries.newNonNestedFilter();
|
||||
rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
|
||||
} else {
|
||||
rawParentFilter = parentObjectMapper.nestedTypeFilter();
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ final class ShardSplittingQuery extends Query {
|
|||
}
|
||||
this.indexMetaData = indexMetaData;
|
||||
this.shardId = shardId;
|
||||
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null;
|
||||
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null;
|
||||
}
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
|
||||
|
@ -336,9 +336,9 @@ final class ShardSplittingQuery extends Query {
|
|||
* than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is
|
||||
* executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either.
|
||||
*/
|
||||
private static BitSetProducer newParentDocBitSetProducer() {
|
||||
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) {
|
||||
return context -> {
|
||||
Query query = Queries.newNonNestedFilter();
|
||||
Query query = Queries.newNonNestedFilter(indexVersionCreated);
|
||||
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
|
||||
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
||||
searcher.setQueryCache(null);
|
||||
|
|
|
@ -270,7 +270,7 @@ final class DefaultSearchContext extends SearchContext {
|
|||
&& typeFilter == null // when a _type filter is set, it will automatically exclude nested docs
|
||||
&& new NestedHelper(mapperService()).mightMatchNestedDocs(query)
|
||||
&& (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) {
|
||||
filters.add(Queries.newNonNestedFilter());
|
||||
filters.add(Queries.newNonNestedFilter(mapperService().getIndexSettings().getIndexVersionCreated()));
|
||||
}
|
||||
|
||||
if (aliasFilter != null) {
|
||||
|
|
|
@ -62,7 +62,9 @@ class NestedAggregator extends BucketsAggregator implements SingleBucketAggregat
|
|||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
|
||||
boolean collectsFromSingleBucket) throws IOException {
|
||||
super(name, factories, context, parentAggregator, pipelineAggregators, metaData);
|
||||
Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter() : Queries.newNonNestedFilter();
|
||||
|
||||
Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter()
|
||||
: Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
|
||||
this.parentFilter = context.bitsetFilterCache().getBitSetProducer(parentFilter);
|
||||
this.childFilter = childObjectMapper.nestedTypeFilter();
|
||||
this.collectsFromSingleBucket = collectsFromSingleBucket;
|
||||
|
|
|
@ -54,7 +54,7 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single
|
|||
throws IOException {
|
||||
super(name, factories, context, parent, pipelineAggregators, metaData);
|
||||
if (objectMapper == null) {
|
||||
parentFilter = Queries.newNonNestedFilter();
|
||||
parentFilter = Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
|
||||
} else {
|
||||
parentFilter = objectMapper.nestedTypeFilter();
|
||||
}
|
||||
|
|
|
@ -181,7 +181,9 @@ public class FetchPhase implements SearchPhase {
|
|||
|
||||
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException {
|
||||
if (context.mapperService().hasNested()) {
|
||||
BitSet bits = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()).getBitSet(subReaderContext);
|
||||
BitSet bits = context.bitsetFilterCache()
|
||||
.getBitSetProducer(Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()))
|
||||
.getBitSet(subReaderContext);
|
||||
if (!bits.get(subDocId)) {
|
||||
return bits.nextSetBit(subDocId);
|
||||
}
|
||||
|
@ -345,7 +347,7 @@ public class FetchPhase implements SearchPhase {
|
|||
}
|
||||
parentFilter = nestedParentObjectMapper.nestedTypeFilter();
|
||||
} else {
|
||||
parentFilter = Queries.newNonNestedFilter();
|
||||
parentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
|
||||
}
|
||||
|
||||
Query childFilter = nestedObjectMapper.nestedTypeFilter();
|
||||
|
|
|
@ -212,7 +212,7 @@ public abstract class SortBuilder<T extends SortBuilder<T>> implements NamedWrit
|
|||
Query parentQuery;
|
||||
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
|
||||
if (objectMapper == null) {
|
||||
parentQuery = Queries.newNonNestedFilter();
|
||||
parentQuery = Queries.newNonNestedFilter(context.indexVersionCreated());
|
||||
} else {
|
||||
parentQuery = objectMapper.nestedTypeFilter();
|
||||
}
|
||||
|
|
|
@ -19,15 +19,28 @@
|
|||
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
public class QueriesTests extends ESTestCase {
|
||||
|
||||
public void testNonNestedQuery() {
|
||||
for (Version version : VersionUtils.allVersions()) {
|
||||
// This is a custom query that extends AutomatonQuery and want to make sure the equals method works
|
||||
assertEquals(Queries.newNonNestedFilter(), Queries.newNonNestedFilter());
|
||||
assertEquals(Queries.newNonNestedFilter().hashCode(), Queries.newNonNestedFilter().hashCode());
|
||||
assertEquals(Queries.newNonNestedFilter(version), Queries.newNonNestedFilter(version));
|
||||
assertEquals(Queries.newNonNestedFilter(version).hashCode(), Queries.newNonNestedFilter(version).hashCode());
|
||||
if (version.onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
assertEquals(Queries.newNonNestedFilter(version), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME));
|
||||
} else {
|
||||
assertEquals(Queries.newNonNestedFilter(version), new BooleanQuery.Builder()
|
||||
.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER)
|
||||
.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT)
|
||||
.build());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.common.lucene.search.Queries;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -58,14 +59,16 @@ public class TypeFieldTypeTests extends FieldTypeTestCase {
|
|||
|
||||
public void testTermsQueryWhenTypesAreDisabled() throws Exception {
|
||||
QueryShardContext context = Mockito.mock(QueryShardContext.class);
|
||||
Version indexVersionCreated = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT);
|
||||
Settings indexSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build();
|
||||
IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
|
||||
Mockito.when(context.getIndexSettings()).thenReturn(mockSettings);
|
||||
Mockito.when(context.indexVersionCreated()).thenReturn(indexVersionCreated);
|
||||
|
||||
MapperService mapperService = Mockito.mock(MapperService.class);
|
||||
Set<String> types = Collections.emptySet();
|
||||
|
@ -84,7 +87,7 @@ public class TypeFieldTypeTests extends FieldTypeTestCase {
|
|||
|
||||
Mockito.when(mapperService.hasNested()).thenReturn(true);
|
||||
query = ft.termQuery("my_type", context);
|
||||
assertEquals(Queries.newNonNestedFilter(), query);
|
||||
assertEquals(Queries.newNonNestedFilter(context.indexVersionCreated()), query);
|
||||
|
||||
types = Collections.singleton("other_type");
|
||||
Mockito.when(mapperService.types()).thenReturn(types);
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -51,6 +52,7 @@ import java.util.List;
|
|||
public class ShardSplittingQueryTests extends ESTestCase {
|
||||
|
||||
public void testSplitOnID() throws IOException {
|
||||
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
Directory dir = newFSDirectory(createTempDir());
|
||||
final int numDocs = randomIntBetween(50, 100);
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
@ -76,13 +78,15 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
}
|
||||
docs.add(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
writer.addDocuments(docs);
|
||||
} else {
|
||||
writer.addDocument(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -95,6 +99,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSplitOnRouting() throws IOException {
|
||||
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
Directory dir = newFSDirectory(createTempDir());
|
||||
final int numDocs = randomIntBetween(50, 100);
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
@ -122,14 +127,16 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
docs.add(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
writer.addDocuments(docs);
|
||||
} else {
|
||||
writer.addDocument(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -140,6 +147,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSplitOnIdOrRouting() throws IOException {
|
||||
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
Directory dir = newFSDirectory(createTempDir());
|
||||
final int numDocs = randomIntBetween(50, 100);
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
@ -160,13 +168,15 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
rootDoc = Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
);
|
||||
} else {
|
||||
shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
|
||||
rootDoc = Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -194,6 +204,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
|
||||
|
||||
public void testSplitOnRoutingPartitioned() throws IOException {
|
||||
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
Directory dir = newFSDirectory(createTempDir());
|
||||
final int numDocs = randomIntBetween(50, 100);
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
@ -223,14 +234,16 @@ public class ShardSplittingQueryTests extends ESTestCase {
|
|||
docs.add(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
writer.addDocuments(docs);
|
||||
} else {
|
||||
writer.addDocument(Arrays.asList(
|
||||
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
|
||||
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
|
||||
new SortedNumericDocValuesField("shard_id", shardId)
|
||||
new SortedNumericDocValuesField("shard_id", shardId),
|
||||
sequenceIDFields.primaryTerm
|
||||
));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,6 +102,7 @@ public class DefaultSearchContextTests extends ESTestCase {
|
|||
IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build();
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
|
||||
when(indexService.getIndexSettings()).thenReturn(indexSettings);
|
||||
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
|
||||
|
||||
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
|
||||
|
|
|
@ -36,11 +36,13 @@ import org.apache.lucene.search.MatchAllDocsQuery;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
|
@ -56,6 +58,7 @@ import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
|
|||
import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
|
||||
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -73,6 +76,9 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
private static final String MAX_AGG_NAME = "maxAgg";
|
||||
private static final String SUM_AGG_NAME = "sumAgg";
|
||||
|
||||
private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
|
||||
|
||||
public void testNoDocs() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
|
||||
|
@ -120,6 +126,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test",
|
||||
TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
}
|
||||
|
@ -168,6 +175,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test",
|
||||
TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
}
|
||||
|
@ -216,6 +224,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test",
|
||||
TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
}
|
||||
|
@ -254,6 +263,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
public void testResetRootDocId() throws Exception {
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null);
|
||||
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
|
||||
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||
try (Directory directory = newDirectory()) {
|
||||
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc)) {
|
||||
List<Document> documents = new ArrayList<>();
|
||||
|
@ -274,6 +284,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
document = new Document();
|
||||
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
iw.commit();
|
||||
|
@ -288,6 +299,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
document = new Document();
|
||||
document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
documents.clear();
|
||||
|
@ -299,6 +311,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
document = new Document();
|
||||
document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
documents.add(document);
|
||||
iw.addDocuments(documents);
|
||||
|
||||
|
@ -314,7 +327,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
fieldType.setName(VALUE_FIELD_NAME);
|
||||
|
||||
BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST);
|
||||
bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST);
|
||||
bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), BooleanClause.Occur.MUST_NOT);
|
||||
|
||||
Nested nested = search(newSearcher(indexReader, false, true),
|
||||
|
@ -550,6 +563,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
|
|||
Document document = new Document();
|
||||
document.add(new Field(UidFieldMapper.NAME, "book#" + id, UidFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(new Field(TypeFieldMapper.NAME, "book", TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
document.add(sequenceIDFields.primaryTerm);
|
||||
for (String author : authors) {
|
||||
document.add(new SortedSetDocValuesField("author", new BytesRef(author)));
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.search.MatchAllDocsQuery;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
|
@ -108,6 +109,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
|
|||
TypeFieldMapper.Defaults.FIELD_TYPE));
|
||||
long value = randomNonNegativeLong() % 10000;
|
||||
document.add(new SortedNumericDocValuesField(VALUE_FIELD_NAME, value));
|
||||
document.add(SeqNoFieldMapper.SequenceIDFields.emptySeqID().primaryTerm);
|
||||
if (numNestedDocs > 0) {
|
||||
expectedMaxValue = Math.max(expectedMaxValue, value);
|
||||
expectedParentDocs++;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.BitSetIterator;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
|
@ -65,7 +66,11 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
|
|||
for (PercolateQuery percolateQuery : percolateQueries) {
|
||||
String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
|
||||
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
|
||||
Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(), false);
|
||||
// there is a bug in lucene's MemoryIndex that doesn't allow us to use docValues here...
|
||||
// See https://issues.apache.org/jira/browse/LUCENE-8055
|
||||
// for now we just use version 6.0 version to find nested parent
|
||||
final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated();
|
||||
Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(version), false);
|
||||
Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
|
||||
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
|
||||
BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc);
|
||||
|
|
Loading…
Reference in New Issue