percolator: removed .percolator type instead a field of type `percolator` should be configured before indexing percolator queries

* Added an extra `field` parameter to the `percolator` query to indicate what percolator field should be used. This must be an existing field in the mapping of type `percolator`.
* The `.percolator` type is now forbidden. (just like any type that starts with a `.`)

This only applies for new indices created on 5.0 and later. Indices created on previous versions the .percolator type is still allowed to exist.
The new `percolator` field type isn't active in such indices and the `PercolatorQueryCache` knows how to load queries from these legacy indices.
The `PercolatorQueryBuilder` will not enforce that the `field` parameter is of type `percolator`.
This commit is contained in:
Martijn van Groningen 2016-04-05 11:53:28 +02:00
parent a2ab13ddd1
commit 40c22fc654
32 changed files with 1336 additions and 722 deletions

View File

@ -20,10 +20,13 @@ package org.elasticsearch.action.percolate;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -53,7 +56,9 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class TransportPercolateAction extends HandledTransportAction<PercolateRequest, PercolateResponse> {
@ -194,7 +199,8 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
searchSource.field("size", 0);
}
PercolatorQueryBuilder percolatorQueryBuilder = new PercolatorQueryBuilder(percolateRequest.documentType(), documentSource);
PercolatorQueryBuilder percolatorQueryBuilder =
new PercolatorQueryBuilder("query", percolateRequest.documentType(), documentSource);
if (querySource != null) {
try (XContentParser parser = XContentHelper.createParser(querySource)) {
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry, parser, parseFieldMatcher);
@ -236,9 +242,15 @@ public class TransportPercolateAction extends HandledTransportAction<PercolateRe
}
}
List<ShardOperationFailedException> shardFailures = new ArrayList<>(searchResponse.getShardFailures().length);
for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
shardFailures.add(new DefaultShardOperationFailedException(shardSearchFailure.index(), shardSearchFailure.shardId(),
shardSearchFailure.getCause()));
}
return new PercolateResponse(
searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(),
Arrays.asList(searchResponse.getShardFailures()), matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations()
shardFailures, matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations()
);
}

View File

@ -23,7 +23,9 @@ import com.carrotsearch.hppc.ObjectHashSet;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.document.FieldType;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
@ -104,7 +106,6 @@ public class MapperService extends AbstractIndexComponent {
private final boolean dynamic;
private volatile String defaultMappingSource;
private volatile String defaultPercolatorMappingSource;
private volatile Map<String, DocumentMapper> mappers = emptyMap();
@ -137,15 +138,6 @@ public class MapperService extends AbstractIndexComponent {
this.mapperRegistry = mapperRegistry;
this.dynamic = this.indexSettings.getValue(INDEX_MAPPER_DYNAMIC_SETTING);
defaultPercolatorMappingSource = "{\n" +
"\"_default_\":{\n" +
"\"properties\" : {\n" +
"\"query\" : {\n" +
"\"type\" : \"percolator\"\n" +
"}\n" +
"}\n" +
"}\n" +
"}";
if (index().getName().equals(ScriptService.SCRIPT_INDEX)){
defaultMappingSource = "{" +
"\"_default_\": {" +
@ -160,7 +152,7 @@ public class MapperService extends AbstractIndexComponent {
}
if (logger.isTraceEnabled()) {
logger.trace("using dynamic[{}], default mapping source[{}], default percolator mapping source[{}]", dynamic, defaultMappingSource, defaultPercolatorMappingSource);
logger.trace("using dynamic[{}], default mapping source[{}]", dynamic, defaultMappingSource);
} else if (logger.isDebugEnabled()) {
logger.debug("using dynamic[{}]", dynamic);
}
@ -288,6 +280,7 @@ public class MapperService extends AbstractIndexComponent {
checkNestedFieldsLimit(fullPathObjectMappers);
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size());
checkDepthLimit(fullPathObjectMappers.keySet());
checkPercolatorFieldLimit(fieldTypes);
}
Set<String> parentTypes = this.parentTypes;
@ -337,7 +330,12 @@ public class MapperService extends AbstractIndexComponent {
}
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
return mapper.type().startsWith(".") && !PercolatorFieldMapper.TYPE_NAME.equals(mapper.type());
boolean legacyIndex = getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_alpha1);
if (legacyIndex) {
return mapper.type().startsWith(".") && !PercolatorFieldMapper.LEGACY_TYPE_NAME.equals(mapper.type());
} else {
return mapper.type().startsWith(".");
}
}
private boolean assertSerialization(DocumentMapper mapper) {
@ -445,13 +443,26 @@ public class MapperService extends AbstractIndexComponent {
}
}
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
String defaultMappingSource;
if (PercolatorFieldMapper.TYPE_NAME.equals(mappingType)) {
defaultMappingSource = this.defaultPercolatorMappingSource;
} else {
defaultMappingSource = this.defaultMappingSource;
/**
* We only allow upto 1 percolator field per index.
*
* Reasoning here is that the PercolatorQueryCache only supports a single document having a percolator query.
* Also specifying multiple queries per document feels like an anti pattern
*/
private void checkPercolatorFieldLimit(Iterable<MappedFieldType> fieldTypes) {
List<String> percolatorFieldTypes = new ArrayList<>();
for (MappedFieldType fieldType : fieldTypes) {
if (fieldType instanceof PercolatorFieldMapper.PercolatorFieldType) {
percolatorFieldTypes.add(fieldType.name());
}
}
if (percolatorFieldTypes.size() > 1) {
throw new IllegalArgumentException("Up to one percolator field type is allowed per index, " +
"found the following percolator fields [" + percolatorFieldTypes + "]");
}
}
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null);
}

View File

@ -57,6 +57,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
/**
@ -234,6 +235,9 @@ public final class ExtractQueryTermsService {
* Creates a boolean query with a should clause for each term on all fields of the specified index reader.
*/
public static Query createQueryTermsQuery(IndexReader indexReader, String queryMetadataField, String unknownQueryField) throws IOException {
Objects.requireNonNull(queryMetadataField);
Objects.requireNonNull(unknownQueryField);
List<Term> extractedTerms = new ArrayList<>();
extractedTerms.add(new Term(unknownQueryField));
Fields fields = MultiFields.getFields(indexReader);

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
@ -48,53 +49,55 @@ import java.util.Map;
public class PercolatorFieldMapper extends FieldMapper {
public static final String TYPE_NAME = ".percolator";
public static final String NAME = "query";
@Deprecated
public static final String LEGACY_TYPE_NAME = ".percolator";
public static final String CONTENT_TYPE = "percolator";
public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType();
private static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType();
private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query";
private static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field";
public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME;
public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME;
public static final String QUERY_BUILDER_FULL_FIELD_NAME = NAME + "." + QUERY_BUILDER_FIELD_NAME;
static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field";
public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {
private final QueryShardContext queryShardContext;
public Builder(QueryShardContext queryShardContext) {
super(NAME, FIELD_TYPE, FIELD_TYPE);
public Builder(String fieldName, QueryShardContext queryShardContext) {
super(fieldName, FIELD_TYPE, FIELD_TYPE);
this.queryShardContext = queryShardContext;
}
@Override
public PercolatorFieldMapper build(BuilderContext context) {
context.path().add(name);
KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context);
KeywordFieldMapper unknownQueryField = createExtractQueryFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context);
BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder().build(context);
context.path().add(name());
KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME, context);
((PercolatorFieldType) fieldType).queryTermsField = extractedTermsField.fieldType();
KeywordFieldMapper unknownQueryField = createExtractQueryFieldBuilder(UNKNOWN_QUERY_FIELD_NAME, context);
((PercolatorFieldType) fieldType).unknownQueryField = unknownQueryField.fieldType();
BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder(context);
((PercolatorFieldType) fieldType).queryBuilderField = queryBuilderField.fieldType();
context.path().remove();
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField, queryBuilderField);
setupFieldType(context);
return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(),
multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField,
unknownQueryField, queryBuilderField);
}
static KeywordFieldMapper.Builder createExtractQueryFieldBuilder(String name) {
static KeywordFieldMapper createExtractQueryFieldBuilder(String name, BuilderContext context) {
KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name);
queryMetaDataFieldBuilder.docValues(false);
queryMetaDataFieldBuilder.store(false);
queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS);
return queryMetaDataFieldBuilder;
return queryMetaDataFieldBuilder.build(context);
}
static BinaryFieldMapper.Builder createQueryBuilderFieldBuilder() {
static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) {
BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME);
builder.docValues(true);
builder.indexOptions(IndexOptions.NONE);
builder.store(false);
builder.fieldType().setDocValuesType(DocValuesType.BINARY);
return builder;
return builder.build(context);
}
}
@ -102,21 +105,39 @@ public class PercolatorFieldMapper extends FieldMapper {
@Override
public Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
return new Builder(parserContext.queryShardContext());
return new Builder(name, parserContext.queryShardContext());
}
}
public static final class PercolatorFieldType extends MappedFieldType {
public static class PercolatorFieldType extends MappedFieldType {
private MappedFieldType queryTermsField;
private MappedFieldType unknownQueryField;
private MappedFieldType queryBuilderField;
public PercolatorFieldType() {
setName(NAME);
setIndexOptions(IndexOptions.NONE);
setDocValuesType(DocValuesType.NONE);
setStored(false);
}
public PercolatorFieldType(MappedFieldType ref) {
public PercolatorFieldType(PercolatorFieldType ref) {
super(ref);
queryTermsField = ref.queryTermsField;
unknownQueryField = ref.unknownQueryField;
queryBuilderField = ref.queryBuilderField;
}
public String getExtractedTermsField() {
return queryTermsField.name();
}
public String getUnknownQueryFieldName() {
return unknownQueryField.name();
}
public String getQueryBuilderFieldName() {
return queryBuilderField.name();
}
@Override
@ -132,9 +153,9 @@ public class PercolatorFieldMapper extends FieldMapper {
private final boolean mapUnmappedFieldAsString;
private final QueryShardContext queryShardContext;
private final KeywordFieldMapper queryTermsField;
private final KeywordFieldMapper unknownQueryField;
private final BinaryFieldMapper queryBuilderField;
private KeywordFieldMapper queryTermsField;
private KeywordFieldMapper unknownQueryField;
private BinaryFieldMapper queryBuilderField;
public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext,
@ -151,6 +172,16 @@ public class PercolatorFieldMapper extends FieldMapper {
@Override
public Mapper parse(ParseContext context) throws IOException {
QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext);
DocumentMapper documentMapper = queryShardContext.getMapperService().documentMapper(context.type());
for (FieldMapper fieldMapper : documentMapper.mappers()) {
if (fieldMapper instanceof PercolatorFieldMapper) {
PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
if (context.doc().getField(fieldType.getQueryBuilderFieldName()) != null) {
throw new IllegalArgumentException("a document can only contain one percolator query");
}
}
}
XContentParser parser = context.parser();
QueryBuilder<?> queryBuilder = parseQueryBuilder(queryShardContext.newParseContext(parser), parser.getTokenLocation());
// Fetching of terms, shapes and indexed scripts happen during this rewrite:

View File

@ -30,8 +30,6 @@ import org.apache.lucene.search.Query;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.PercolatorQuery;
import org.elasticsearch.search.SearchParseElement;
@ -80,20 +78,20 @@ public class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
createSubSearchContext(context, percolatorLeafReaderContext, percolatorQuery.getDocumentSource());
for (InternalSearchHit hit : hits) {
if (PercolatorFieldMapper.TYPE_NAME.equals(hit.getType())) {
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
Query query = queriesRegistry.getQueries(ctx).getQuery(hit.docId() - ctx.docBase);
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
int segmentDocId = hit.docId() - ctx.docBase;
Query query = queriesRegistry.getQueries(ctx).getQuery(segmentDocId);
if (query != null) {
subSearchContext.parsedQuery(new ParsedQuery(query));
hitContext.reset(
new InternalSearchHit(0, "unknown", new Text(percolatorQuery.getDocumentType()), Collections.emptyMap()),
percolatorLeafReaderContext, 0, percolatorIndexSearcher
new InternalSearchHit(0, "unknown", new Text(percolatorQuery.getDocumentType()), Collections.emptyMap()),
percolatorLeafReaderContext, 0, percolatorIndexSearcher
);
hitContext.cache().clear();
highlightPhase.hitExecute(subSearchContext, hitContext);
hit.highlightFields().putAll(hitContext.hit().getHighlightFields());
}
}
}
@Override

View File

@ -28,7 +28,10 @@ import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -46,8 +49,10 @@ import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.index.IndexWarmer.TerminationHandle;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.Engine.Searcher;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.query.PercolatorQuery;
@ -63,6 +68,10 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.function.Supplier;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.LEGACY_TYPE_NAME;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.PercolatorFieldType;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.parseQuery;
public final class PercolatorQueryCache extends AbstractIndexComponent
implements Closeable, LeafReader.CoreClosedListener, PercolatorQuery.QueryRegistry {
@ -107,7 +116,7 @@ public final class PercolatorQueryCache extends AbstractIndexComponent
executor.execute(() -> {
try {
final long start = System.nanoTime();
QueriesLeaf queries = loadQueries(ctx, indexShard.indexSettings().getIndexVersionCreated());
QueriesLeaf queries = loadQueries(ctx, indexShard);
cache.put(ctx.reader().getCoreCacheKey(), queries);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace(
@ -127,7 +136,9 @@ public final class PercolatorQueryCache extends AbstractIndexComponent
};
}
QueriesLeaf loadQueries(LeafReaderContext context, Version indexVersionCreated) throws IOException {
QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException {
Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated();
MapperService mapperService = indexShard.mapperService();
LeafReader leafReader = context.reader();
ShardId shardId = ShardUtils.extractShardId(leafReader);
if (shardId == null) {
@ -135,29 +146,48 @@ public final class PercolatorQueryCache extends AbstractIndexComponent
}
if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
// percolator cache insanity
String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " + indexSettings.getIndex();
String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " +
indexSettings.getIndex();
throw new IllegalStateException(message);
}
IntObjectHashMap<Query> queries = new IntObjectHashMap<>();
boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1);
PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME), PostingsEnum.NONE);
if (postings != null) {
if (legacyLoading) {
if (legacyLoading) {
PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME), PostingsEnum.NONE);
if (postings != null) {
LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) {
leafReader.document(docId, visitor);
queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source));
visitor.source = null; // reset
}
} else {
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME);
if (binaryDocValues != null) {
for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) {
BytesRef queryBuilder = binaryDocValues.get(docId);
if (queryBuilder.length > 0) {
queries.put(docId, parseQueryBuilder(docId, queryBuilder));
}
} else {
// Each type can have one percolator field mapper,
// So for each type we check if there is a percolator field mapper
// and parse all the queries for the documents of that type.
IndexSearcher indexSearcher = new IndexSearcher(leafReader);
for (DocumentMapper documentMapper : mapperService.docMappers(false)) {
Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false);
for (FieldMapper fieldMapper : documentMapper.mappers()) {
if (fieldMapper instanceof PercolatorFieldMapper) {
PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.getQueryBuilderFieldName());
if (binaryDocValues != null) {
// use the same leaf reader context the indexSearcher is using too:
Scorer scorer = queryWeight.scorer(leafReader.getContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
for (int docId = iterator.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
BytesRef qbSource = binaryDocValues.get(docId);
if (qbSource.length > 0) {
queries.put(docId, parseQueryBuilder(docId, qbSource));
}
}
}
}
break;
}
}
}
@ -166,11 +196,11 @@ public final class PercolatorQueryCache extends AbstractIndexComponent
return new QueriesLeaf(shardId, queries);
}
private Query parseQueryBuilder(int docId, BytesRef queryBuilder) {
private Query parseQueryBuilder(int docId, BytesRef qbSource) {
XContent xContent = QUERY_BUILDER_CONTENT_TYPE.xContent();
try (XContentParser sourceParser = xContent.createParser(queryBuilder.bytes, queryBuilder.offset, queryBuilder.length)) {
try (XContentParser sourceParser = xContent.createParser(qbSource.bytes, qbSource.offset, qbSource.length)) {
QueryShardContext context = queryShardContextSupplier.get();
return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
} catch (IOException e) {
throw new PercolatorException(index(), "failed to parse query builder for document [" + docId + "]", e);
}
@ -189,7 +219,7 @@ public final class PercolatorQueryCache extends AbstractIndexComponent
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
QueryShardContext context = queryShardContextSupplier.get();
return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
} else {
sourceParser.skipChildren();
}

View File

@ -23,9 +23,11 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
@ -56,22 +58,19 @@ public final class PercolatorQuery extends Query implements Accountable {
private final IndexSearcher percolatorIndexSearcher;
private Query queriesMetaDataQuery;
private final Query percolateTypeQuery;
private Query percolateTypeQuery;
/**
* @param docType The type of the document being percolated
* @param queryRegistry The registry holding all the percolator queries as Lucene queries.
* @param documentSource The source of the document being percolated
* @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated
* @param percolateTypeQuery A query that identifies all document containing percolator queries
*/
public Builder(String docType, QueryRegistry queryRegistry, BytesReference documentSource, IndexSearcher percolatorIndexSearcher,
Query percolateTypeQuery) {
public Builder(String docType, QueryRegistry queryRegistry, BytesReference documentSource, IndexSearcher percolatorIndexSearcher) {
this.docType = Objects.requireNonNull(docType);
this.documentSource = Objects.requireNonNull(documentSource);
this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher);
this.queryRegistry = Objects.requireNonNull(queryRegistry);
this.percolateTypeQuery = Objects.requireNonNull(percolateTypeQuery);
}
/**
@ -87,12 +86,27 @@ public final class PercolatorQuery extends Query implements Accountable {
);
}
/**
* @param percolateTypeQuery A query that identifies all document containing percolator queries
*/
public void setPercolateTypeQuery(Query percolateTypeQuery) {
this.percolateTypeQuery = Objects.requireNonNull(percolateTypeQuery);
}
public PercolatorQuery build() {
if (percolateTypeQuery != null && queriesMetaDataQuery != null) {
throw new IllegalStateException("Either filter by deprecated percolator type or by query metadata");
}
// The query that selects which percolator queries will be evaluated by MemoryIndex:
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(percolateTypeQuery, FILTER);
if (percolateTypeQuery != null) {
builder.add(percolateTypeQuery, FILTER);
}
if (queriesMetaDataQuery != null) {
builder.add(queriesMetaDataQuery, FILTER);
}
return new PercolatorQuery(docType, queryRegistry, documentSource, builder.build(), percolatorIndexSearcher);
}

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.analysis.FieldNameAnalyzer;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
@ -72,6 +73,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
public static final ParseField QUERY_NAME_FIELD = new ParseField(NAME);
static final ParseField DOCUMENT_FIELD = new ParseField("document");
private static final ParseField QUERY_FIELD = new ParseField("field");
private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type");
private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index");
private static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type");
@ -80,6 +82,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
private static final ParseField INDEXED_DOCUMENT_FIELD_PREFERENCE = new ParseField("preference");
private static final ParseField INDEXED_DOCUMENT_FIELD_VERSION = new ParseField("version");
private final String field;
private final String documentType;
private final BytesReference document;
@ -90,13 +93,17 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
private final String indexedDocumentPreference;
private final Long indexedDocumentVersion;
public PercolatorQueryBuilder(String documentType, BytesReference document) {
public PercolatorQueryBuilder(String field, String documentType, BytesReference document) {
if (field == null) {
throw new IllegalArgumentException("[field] is a required argument");
}
if (documentType == null) {
throw new IllegalArgumentException("[document_type] is a required argument");
}
if (document == null) {
throw new IllegalArgumentException("[document] is a required argument");
}
this.field = field;
this.documentType = documentType;
this.document = document;
indexedDocumentIndex = null;
@ -107,9 +114,12 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
indexedDocumentVersion = null;
}
public PercolatorQueryBuilder(String documentType, String indexedDocumentIndex, String indexedDocumentType,
public PercolatorQueryBuilder(String field, String documentType, String indexedDocumentIndex, String indexedDocumentType,
String indexedDocumentId, String indexedDocumentRouting, String indexedDocumentPreference,
Long indexedDocumentVersion) {
if (field == null) {
throw new IllegalArgumentException("[field] is a required argument");
}
if (documentType == null) {
throw new IllegalArgumentException("[document_type] is a required argument");
}
@ -122,6 +132,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
if (indexedDocumentId == null) {
throw new IllegalArgumentException("[id] is a required argument");
}
this.field = field;
this.documentType = documentType;
this.indexedDocumentIndex = indexedDocumentIndex;
this.indexedDocumentType = indexedDocumentType;
@ -137,6 +148,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
*/
public PercolatorQueryBuilder(StreamInput in) throws IOException {
super(in);
field = in.readString();
documentType = in.readString();
indexedDocumentIndex = in.readOptionalString();
indexedDocumentType = in.readOptionalString();
@ -153,6 +165,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(field);
out.writeString(documentType);
out.writeOptionalString(indexedDocumentIndex);
out.writeOptionalString(indexedDocumentType);
@ -172,6 +185,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.field(DOCUMENT_TYPE_FIELD.getPreferredName(), documentType);
builder.field(QUERY_FIELD.getPreferredName(), field);
if (document != null) {
XContentType contentType = XContentFactory.xContentType(document);
if (contentType == builder.contentType()) {
@ -212,6 +226,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String field = null;
String documentType = null;
String indexedDocumentIndex = null;
@ -242,7 +257,9 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
"] query does not support [" + token + "]");
}
} else if (token.isValue()) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, DOCUMENT_TYPE_FIELD)) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
field = parser.text();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, DOCUMENT_TYPE_FIELD)) {
documentType = parser.text();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_INDEX)) {
indexedDocumentIndex = parser.text();
@ -277,9 +294,9 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
PercolatorQueryBuilder queryBuilder;
if (source != null) {
queryBuilder = new PercolatorQueryBuilder(documentType, source);
queryBuilder = new PercolatorQueryBuilder(field, documentType, source);
} else if (indexedDocumentId != null) {
queryBuilder = new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType,
queryBuilder = new PercolatorQueryBuilder(field, documentType, indexedDocumentIndex, indexedDocumentType,
indexedDocumentId, indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion);
} else {
throw new IllegalArgumentException("[" + PercolatorQueryBuilder.NAME + "] query, nothing to percolate");
@ -291,7 +308,8 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
@Override
protected boolean doEquals(PercolatorQueryBuilder other) {
return Objects.equals(documentType, other.documentType)
return Objects.equals(field, other.field)
&& Objects.equals(documentType, other.documentType)
&& Objects.equals(document, other.document)
&& Objects.equals(indexedDocumentIndex, other.indexedDocumentIndex)
&& Objects.equals(indexedDocumentType, other.indexedDocumentType)
@ -300,7 +318,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
@Override
protected int doHashCode() {
return Objects.hash(documentType, document, indexedDocumentIndex, indexedDocumentType, indexedDocumentId);
return Objects.hash(field, documentType, document, indexedDocumentIndex, indexedDocumentType, indexedDocumentId);
}
@Override
@ -327,7 +345,7 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
"indexed document [{}/{}/{}] couldn't be found", indexedDocumentIndex, indexedDocumentType, indexedDocumentId
);
}
return new PercolatorQueryBuilder(documentType, getResponse.getSourceAsBytesRef());
return new PercolatorQueryBuilder(field, documentType, getResponse.getSourceAsBytesRef());
}
@Override
@ -378,19 +396,33 @@ public class PercolatorQueryBuilder extends AbstractQueryBuilder<PercolatorQuery
throw new QueryShardException(context, "no percolator query registry");
}
Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME));
PercolatorQuery.Builder builder = new PercolatorQuery.Builder(
documentType, registry, document, docSearcher, percolateTypeQuery
documentType, registry, document, docSearcher
);
Settings indexSettings = registry.getIndexSettings().getSettings();
if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0_alpha1)) {
builder.extractQueryTermsQuery(
PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME
);
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.PercolatorFieldType)) {
throw new QueryShardException(context, "expected field [" + field +
"] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
PercolatorFieldMapper.PercolatorFieldType pft = (PercolatorFieldMapper.PercolatorFieldType) fieldType;
builder.extractQueryTermsQuery(pft.getExtractedTermsField(), pft.getUnknownQueryFieldName());
} else {
Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME));
builder.setPercolateTypeQuery(percolateTypeQuery);
}
return builder.build();
}
public String getField() {
return field;
}
public String getDocumentType() {
return documentType;
}

View File

@ -840,15 +840,21 @@ public abstract class QueryBuilders {
return new ExistsQueryBuilder(name);
}
public static PercolatorQueryBuilder percolatorQuery(String documentType, BytesReference document) {
return new PercolatorQueryBuilder(documentType, document);
public static PercolatorQueryBuilder percolatorQuery(String queryField, String documentType, BytesReference document) {
return new PercolatorQueryBuilder(queryField, documentType, document);
}
public static PercolatorQueryBuilder percolatorQuery(String documentType, String indexedDocumentIndex,
public static PercolatorQueryBuilder percolatorQuery(String queryField, String documentType, String indexedDocumentIndex,
String indexedDocumentType, String indexedDocumentId) {
return new PercolatorQueryBuilder(queryField, documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
null, null, null);
}
public static PercolatorQueryBuilder percolatorQuery(String queryField, String documentType, String indexedDocumentIndex,
String indexedDocumentType, String indexedDocumentId,
String indexedDocumentRouting, String indexedDocumentPreference,
Long indexedDocumentVersion) {
return new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
return new PercolatorQueryBuilder(queryField, documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion);
}

View File

@ -19,56 +19,132 @@
package org.elasticsearch.index.percolator;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.indices.TermsLookup;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Before;
import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.percolator.PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.index.query.QueryBuilders.termsLookupQuery;
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
private String typeName;
private String fieldName;
private IndexService indexService;
private MapperService mapperService;
private PercolatorFieldMapper.PercolatorFieldType fieldType;
@Before
public void init() throws Exception {
IndexService indexService = createIndex("test", Settings.EMPTY);
indexService = createIndex("test", Settings.EMPTY);
mapperService = indexService.mapperService();
String mapper = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "text").endObject().endObject()
.endObject().endObject().string();
String mapper = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("field").field("type", "text").endObject()
.startObject("number_field").field("type", "long").endObject()
.startObject("date_field").field("type", "date").endObject()
.endObject().endObject().endObject().string();
mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true);
}
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorFieldMapper.TYPE_NAME)
.startObject("properties").startObject("query").field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge(PercolatorFieldMapper.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
private void addQueryMapping() throws Exception {
typeName = randomAsciiOfLength(4);
fieldName = randomAsciiOfLength(4);
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
fieldType = (PercolatorFieldMapper.PercolatorFieldType) mapperService.fullName(fieldName);
}
public void testPercolatorFieldMapper() throws Exception {
ParsedDocument doc = mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.field("query", termQuery("field", "value"))
addQueryMapping();
QueryBuilder queryBuilder = termQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.field(fieldName, queryBuilder)
.endObject().bytes());
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(1));
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME)[0].binaryValue().utf8ToString(), equalTo("field\0value"));
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.getUnknownQueryFieldName()).length, equalTo(0));
assertThat(doc.rootDoc().getFields(fieldType.getExtractedTermsField()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.getExtractedTermsField())[0].binaryValue().utf8ToString(), equalTo("field\0value"));
assertThat(doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName()).length, equalTo(1));
BytesRef qbSource = doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName())[0].binaryValue();
assertQueryBuilder(qbSource, queryBuilder);
// add an query for which we don't extract terms from
queryBuilder = matchAllQuery();
doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.field(fieldName, queryBuilder)
.endObject().bytes());
assertThat(doc.rootDoc().getFields(fieldType.getUnknownQueryFieldName()).length, equalTo(1));
assertThat(doc.rootDoc().getFields(fieldType.getUnknownQueryFieldName())[0].binaryValue(), equalTo(new BytesRef()));
assertThat(doc.rootDoc().getFields(fieldType.getExtractedTermsField()).length, equalTo(0));
assertThat(doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName()).length, equalTo(1));
qbSource = doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName())[0].binaryValue();
assertQueryBuilder(qbSource, queryBuilder);
}
public void testStoringQueries() throws Exception {
addQueryMapping();
QueryBuilder[] queries = new QueryBuilder[]{
termQuery("field", "value"), matchAllQuery(), matchQuery("field", "value"), matchPhraseQuery("field", "value"),
prefixQuery("field", "v"), wildcardQuery("field", "v*"), rangeQuery("number_field").gte(0).lte(9),
rangeQuery("date_field").from("2015-01-01T00:00").to("2015-01-01T00:00")
};
// note: it important that range queries never rewrite, otherwise it will cause results to be wrong.
// (it can't use shard data for rewriting purposes, because percolator queries run on MemoryIndex)
for (QueryBuilder query : queries) {
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.field(fieldName, query)
.endObject().bytes());
BytesRef qbSource = doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName())[0].binaryValue();
assertQueryBuilder(qbSource, query);
}
}
public void testQueryWithRewrite() throws Exception {
addQueryMapping();
client().prepareIndex("remote", "type", "1").setSource("field", "value").get();
QueryBuilder queryBuilder = termsLookupQuery("field", new TermsLookup("remote", "type", "1", "field"));
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.field(fieldName, queryBuilder)
.endObject().bytes());
BytesRef qbSource = doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName())[0].binaryValue();
assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext()));
}
public void testPercolatorFieldMapperUnMappedField() throws Exception {
addQueryMapping();
MapperParsingException exception = expectThrows(MapperParsingException.class, () -> {
mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.field("query", termQuery("unmapped_field", "value"))
mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.field(fieldName, termQuery("unmapped_field", "value"))
.endObject().bytes());
});
assertThat(exception.getCause(), instanceOf(QueryShardException.class));
@ -77,13 +153,14 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
public void testPercolatorFieldMapper_noQuery() throws Exception {
ParsedDocument doc = mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
addQueryMapping();
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.endObject().bytes());
assertThat(doc.rootDoc().getFields(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME).length, equalTo(0));
assertThat(doc.rootDoc().getFields(fieldType.getQueryBuilderFieldName()).length, equalTo(0));
try {
mapperService.documentMapper(PercolatorFieldMapper.TYPE_NAME).parse("test", PercolatorFieldMapper.TYPE_NAME, "1", XContentFactory.jsonBuilder().startObject()
.nullField("query")
mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject()
.nullField(fieldName)
.endObject().bytes());
} catch (MapperParsingException e) {
assertThat(e.getDetailedMessage(), containsString("query malformed, must start with start_object"));
@ -91,18 +168,91 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
}
public void testAllowNoAdditionalSettings() throws Exception {
addQueryMapping();
IndexService indexService = createIndex("test1", Settings.EMPTY);
MapperService mapperService = indexService.mapperService();
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(PercolatorFieldMapper.TYPE_NAME)
.startObject("properties").startObject("query").field("type", "percolator").field("index", "no").endObject().endObject()
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string();
try {
mapperService.merge(PercolatorFieldMapper.TYPE_NAME, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
fail("MapperParsingException expected");
} catch (MapperParsingException e) {
assertThat(e.getMessage(), equalTo("Mapping definition for [query] has unsupported parameters: [index : no]"));
assertThat(e.getMessage(), equalTo("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
}
// multiple percolator fields are allowed in the mapping, but only one field can be used at index time.
public void testMultiplePercolatorFields() throws Exception {
String typeName = "another_type";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties")
.startObject("query_field1").field("type", "percolator").endObject()
.startObject("query_field2").field("type", "percolator").endObject()
.endObject()
.endObject().endObject().string();
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
});
assertThat(exception.getMessage(), equalTo("Up to one percolator field type is allowed per index, " +
"found the following percolator fields [[query_field1, query_field2]]"));
}
// percolator field can be nested under an object field, but only one query can be specified per document
public void testNestedPercolatorField() throws Exception {
String typeName = "another_type";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject(typeName)
.startObject("properties")
.startObject("object_field")
.field("type", "object")
.startObject("properties")
.startObject("query_field").field("type", "percolator").endObject()
.endObject()
.endObject()
.endObject()
.endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1",
jsonBuilder().startObject().startObject("object_field")
.field("query_field", queryBuilder)
.endObject().endObject().bytes()
);
assertThat(doc.rootDoc().getFields().size(), equalTo(18)); // also includes all other meta fields
BytesRef queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue();
assertQueryBuilder(queryBuilderAsBytes, queryBuilder);
doc = mapperService.documentMapper(typeName).parse("test", typeName, "1",
jsonBuilder().startObject()
.startArray("object_field")
.startObject().field("query_field", queryBuilder).endObject()
.endArray()
.endObject().bytes()
);
assertThat(doc.rootDoc().getFields().size(), equalTo(18)); // also includes all other meta fields
queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue();
assertQueryBuilder(queryBuilderAsBytes, queryBuilder);
MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
mapperService.documentMapper(typeName).parse("test", typeName, "1",
jsonBuilder().startObject()
.startArray("object_field")
.startObject().field("query_field", queryBuilder).endObject()
.startObject().field("query_field", queryBuilder).endObject()
.endArray()
.endObject().bytes()
);
}
);
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
assertThat(e.getCause().getMessage(), equalTo("a document can only contain one percolator query"));
}
private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException {
XContentParser sourceParser = QUERY_BUILDER_CONTENT_TYPE.xContent().createParser(actual.bytes, actual.offset, actual.length);
QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser);
assertThat(qsc.parseInnerQueryBuilder(), equalTo(expected));
}
}

View File

@ -42,7 +42,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testHitsExecutionNeeded() {
PercolatorQuery percolatorQuery = new PercolatorQuery.Builder("", ctx -> null, new BytesArray("{}"),
Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery())
Mockito.mock(IndexSearcher.class))
.build();
PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(null);
@ -61,7 +61,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
public void testLocatePercolatorQuery() {
PercolatorQuery percolatorQuery = new PercolatorQuery.Builder("", ctx -> null, new BytesArray("{}"),
Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery())
Mockito.mock(IndexSearcher.class))
.build();
assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()), nullValue());

View File

@ -19,7 +19,9 @@
package org.elasticsearch.index.percolator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@ -56,6 +58,8 @@ import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.DocumentFieldMappers;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
@ -134,6 +138,7 @@ public class PercolatorQueryCacheTests extends ESTestCase {
boolean legacyFormat = randomBoolean();
Version version = legacyFormat ? Version.V_2_0_0 : Version.CURRENT;
IndexShard indexShard = mockIndexShard(version, legacyFormat);
storeQuery("0", indexWriter, termQuery("field1", "value1"), true, legacyFormat);
storeQuery("1", indexWriter, wildcardQuery("field1", "v*"), true, legacyFormat);
@ -161,7 +166,7 @@ public class PercolatorQueryCacheTests extends ESTestCase {
initialize("field1", "type=keyword", "field2", "type=keyword", "field3", "type=keyword");
PercolatorQueryCache.QueriesLeaf leaf = cache.loadQueries(indexReader.leaves().get(0), version);
PercolatorQueryCache.QueriesLeaf leaf = cache.loadQueries(indexReader.leaves().get(0), indexShard);
assertThat(leaf.queries.size(), equalTo(5));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("field1", "value1"))));
assertThat(leaf.getQuery(1), equalTo(new WildcardQuery(new Term("field1", "v*"))));
@ -212,7 +217,7 @@ public class PercolatorQueryCacheTests extends ESTestCase {
assertThat(e.getMessage(), equalTo("queries not loaded, queries should be have been preloaded during index warming..."));
}
IndexShard indexShard = mockIndexShard();
IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
ThreadPool threadPool = mockThreadPool();
IndexWarmer.Listener listener = cache.createListener(threadPool);
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
@ -259,7 +264,7 @@ public class PercolatorQueryCacheTests extends ESTestCase {
initialize("a", "type=keyword");
IndexShard indexShard = mockIndexShard();
IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
ThreadPool threadPool = mockThreadPool();
IndexWarmer.Listener listener = cache.createListener(threadPool);
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
@ -312,7 +317,11 @@ public class PercolatorQueryCacheTests extends ESTestCase {
Document doc = new Document();
doc.add(new StringField("id", id, Field.Store.NO));
if (typeField) {
doc.add(new StringField(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME, Field.Store.NO));
if (legacy) {
doc.add(new StringField(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME, Field.Store.NO));
} else {
doc.add(new StringField(TypeFieldMapper.NAME, "query", Field.Store.NO));
}
}
if (legacy) {
BytesReference percolatorQuery = XContentFactory.jsonBuilder().startObject()
@ -326,12 +335,12 @@ public class PercolatorQueryCacheTests extends ESTestCase {
BytesRef queryBuilderAsBytes = new BytesRef(
XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE).value(queryBuilder).bytes().toBytes()
);
doc.add(new BinaryDocValuesField(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME, queryBuilderAsBytes));
doc.add(new BinaryDocValuesField(PercolatorFieldMapper.QUERY_BUILDER_FIELD_NAME, queryBuilderAsBytes));
}
indexWriter.addDocument(doc);
}
IndexShard mockIndexShard() {
IndexShard mockIndexShard(Version version, boolean legacyFormat) {
IndexShard indexShard = mock(IndexShard.class);
ShardIndexWarmerService shardIndexWarmerService = mock(ShardIndexWarmerService.class);
when(shardIndexWarmerService.logger()).thenReturn(logger);
@ -340,11 +349,37 @@ public class PercolatorQueryCacheTests extends ESTestCase {
IndexMetaData.builder("_index").settings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_VERSION_CREATED, version)
).build(),
Settings.EMPTY
);
when(indexShard.indexSettings()).thenReturn(indexSettings);
PercolatorFieldMapper.PercolatorFieldType fieldType = mock(PercolatorFieldMapper.PercolatorFieldType.class);
when(fieldType.name()).thenReturn("query");
when(fieldType.getQueryBuilderFieldName()).thenReturn(PercolatorFieldMapper.QUERY_BUILDER_FIELD_NAME);
PercolatorFieldMapper percolatorFieldMapper = mock(PercolatorFieldMapper.class);
when(percolatorFieldMapper.fieldType()).thenReturn(fieldType);
MapperService mapperService = mock(MapperService.class);
DocumentMapper documentMapper = mock(DocumentMapper.class);
if (legacyFormat) {
when(documentMapper.type()).thenReturn(PercolatorFieldMapper.LEGACY_TYPE_NAME);
when(documentMapper.typeFilter())
.thenReturn(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME)));
} else {
when(documentMapper.type()).thenReturn("query");
when(documentMapper.typeFilter()).thenReturn(new TermQuery(new Term(TypeFieldMapper.NAME, "query")));
}
Analyzer analyzer = new SimpleAnalyzer();
DocumentFieldMappers documentFieldMappers =
new DocumentFieldMappers(Collections.singleton(percolatorFieldMapper), analyzer, analyzer, analyzer);
when(documentMapper.mappers()).thenReturn(documentFieldMappers);
when(mapperService.docMappers(false)).thenReturn(Collections.singleton(documentMapper));
when(indexShard.mapperService()).thenReturn(mapperService);
return indexShard;
}

View File

@ -24,29 +24,37 @@ import com.fasterxml.jackson.core.JsonParseException;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.script.Script;
import org.hamcrest.Matchers;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class PercolatorQueryBuilderTests extends AbstractQueryTestCase<PercolatorQueryBuilder> {
private static final Set<String> SHUFFLE_PROTECTED_FIELDS = singleton(PercolatorQueryBuilder.DOCUMENT_FIELD.getPreferredName());
private static final Set<String> SHUFFLE_PROTECTED_FIELDS =
Collections.singleton(PercolatorQueryBuilder.DOCUMENT_FIELD.getPreferredName());
private static String queryField;
private static String docType;
private String indexedDocumentIndex;
private String indexedDocumentType;
private String indexedDocumentId;
@ -57,13 +65,25 @@ public class PercolatorQueryBuilderTests extends AbstractQueryTestCase<Percolato
boolean indexedDocumentExists = true;
@BeforeClass
public static void before() throws Exception {
queryField = randomAsciiOfLength(4);
docType = randomAsciiOfLength(4);
MapperService mapperService = createShardContext().getMapperService();
mapperService.merge("query_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("query_type",
queryField, "type=percolator"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
STRING_FIELD_NAME, "type=text"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
}
@Override
protected PercolatorQueryBuilder doCreateTestQueryBuilder() {
return doCreateTestQueryBuilder(randomBoolean());
}
private PercolatorQueryBuilder doCreateTestQueryBuilder(boolean indexedDocument) {
String docType = randomAsciiOfLength(4);
documentSource = randomSource();
if (indexedDocument) {
indexedDocumentIndex = randomAsciiOfLength(4);
@ -72,10 +92,10 @@ public class PercolatorQueryBuilderTests extends AbstractQueryTestCase<Percolato
indexedDocumentRouting = randomAsciiOfLength(4);
indexedDocumentPreference = randomAsciiOfLength(4);
indexedDocumentVersion = (long) randomIntBetween(0, Integer.MAX_VALUE);
return new PercolatorQueryBuilder(docType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
return new PercolatorQueryBuilder(queryField, docType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId,
indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion);
} else {
return new PercolatorQueryBuilder(docType, documentSource);
return new PercolatorQueryBuilder(queryField, docType, documentSource);
}
}
@ -120,28 +140,20 @@ public class PercolatorQueryBuilderTests extends AbstractQueryTestCase<Percolato
@Override
public void testMustRewrite() throws IOException {
PercolatorQueryBuilder pqb = doCreateTestQueryBuilder(true);
try {
pqb.toQuery(createShardContext());
fail("IllegalStateException expected");
} catch (IllegalStateException e) {
assertThat(e.getMessage(), equalTo("query builder must be rewritten first"));
}
IllegalStateException e = expectThrows(IllegalStateException.class, () -> pqb.toQuery(createShardContext()));
assertThat(e.getMessage(), equalTo("query builder must be rewritten first"));
QueryBuilder<?> rewrite = pqb.rewrite(createShardContext());
PercolatorQueryBuilder geoShapeQueryBuilder = new PercolatorQueryBuilder(pqb.getDocumentType(), documentSource);
PercolatorQueryBuilder geoShapeQueryBuilder = new PercolatorQueryBuilder(pqb.getField(), pqb.getDocumentType(), documentSource);
assertEquals(geoShapeQueryBuilder, rewrite);
}
public void testIndexedDocumentDoesNotExist() throws IOException {
indexedDocumentExists = false;
PercolatorQueryBuilder pqb = doCreateTestQueryBuilder(true);
try {
pqb.rewrite(createShardContext());
fail("ResourceNotFoundException expected");
} catch (ResourceNotFoundException e) {
String expectedString = "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentType + "/" +
indexedDocumentId + "] couldn't be found";
assertThat(e.getMessage() , equalTo(expectedString));
}
ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> pqb.rewrite(createShardContext()));
String expectedString = "indexed document [" + indexedDocumentIndex + "/" + indexedDocumentType + "/" +
indexedDocumentId + "] couldn't be found";
assertThat(e.getMessage() , equalTo(expectedString));
}
// overwrite this test, because adding bogus field to the document part is valid and that would make the test fail
@ -172,51 +184,47 @@ public class PercolatorQueryBuilderTests extends AbstractQueryTestCase<Percolato
}
public void testRequiredParameters() {
try {
QueryBuilders.percolatorQuery(null, new BytesArray("{}"));
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[document_type] is a required argument"));
}
try {
QueryBuilders.percolatorQuery("_document_type", null);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[document] is a required argument"));
}
try {
QueryBuilders.percolatorQuery(null, "_index", "_type", "_id", null, null, null);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[document_type] is a required argument"));
}
try {
QueryBuilders.percolatorQuery("_document_type", null, "_type", "_id", null, null, null);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[index] is a required argument"));
}
try {
QueryBuilders.percolatorQuery("_document_type", "_index", null, "_id", null, null, null);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[type] is a required argument"));
}
try {
QueryBuilders.percolatorQuery("_document_type", "_index", "_type", null, null, null, null);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[id] is a required argument"));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery(null, null, new BytesArray("{}"));
});
assertThat(e.getMessage(), equalTo("[field] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> QueryBuilders.percolatorQuery("_field", null, new BytesArray("{}")));
assertThat(e.getMessage(), equalTo("[document_type] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> QueryBuilders.percolatorQuery("_field", "_document_type", null));
assertThat(e.getMessage(), equalTo("[document] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery(null, null, "_index", "_type", "_id", null, null, null);
});
assertThat(e.getMessage(), equalTo("[field] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery("_field", null, "_index", "_type", "_id", null, null, null);
});
assertThat(e.getMessage(), equalTo("[document_type] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery("_field", "_document_type", null, "_type", "_id", null, null, null);
});
assertThat(e.getMessage(), equalTo("[index] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery("_field", "_document_type", "_index", null, "_id", null, null, null);
});
assertThat(e.getMessage(), equalTo("[type] is a required argument"));
e = expectThrows(IllegalArgumentException.class, () -> {
QueryBuilders.percolatorQuery("_field", "_document_type", "_index", "_type", null, null, null, null);
});
assertThat(e.getMessage(), equalTo("[id] is a required argument"));
}
public void testFromJsonNoDocumentType() throws IOException {
try {
parseQuery("{\"percolator\" : { \"document\": {}}");
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[percolator] query is missing required [document_type] parameter"));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> parseQuery("{\"percolator\" : { \"document\": {}}"));
assertThat(e.getMessage(), equalTo("[percolator] query is missing required [document_type] parameter"));
}
private static BytesReference randomSource() {

View File

@ -54,6 +54,7 @@ import org.apache.lucene.store.Directory;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.ExtractQueryTermsService;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
@ -147,8 +148,7 @@ public class PercolatorQueryTests extends ESTestCase {
"docType",
queryRegistry,
new BytesArray("{}"),
percolateSearcher,
new MatchAllDocsQuery()
percolateSearcher
);
builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
// no scoring, wrapping it in a constant score query:
@ -222,8 +222,7 @@ public class PercolatorQueryTests extends ESTestCase {
"docType",
queryRegistry,
new BytesArray("{}"),
percolateSearcher,
new MatchAllDocsQuery()
percolateSearcher
);
builder.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
Query query = builder.build();
@ -326,7 +325,7 @@ public class PercolatorQueryTests extends ESTestCase {
ParseContext.Document document = new ParseContext.Document();
ExtractQueryTermsService.extractQueryTerms(query, document, EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME,
EXTRACTED_TERMS_FIELD_TYPE);
document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorFieldMapper.TYPE_NAME, id)));
document.add(new StoredField(UidFieldMapper.NAME, Uid.createUid(PercolatorFieldMapper.LEGACY_TYPE_NAME, id)));
assert extraFields.length % 2 == 0;
for (int i = 0; i < extraFields.length; i++) {
document.add(new StringField(extraFields[i], extraFields[++i], Field.Store.NO));
@ -340,8 +339,7 @@ public class PercolatorQueryTests extends ESTestCase {
"docType",
queryRegistry,
new BytesArray("{}"),
percolateSearcher,
new MatchAllDocsQuery()
percolateSearcher
);
// enables the optimization that prevents queries from being evaluated that don't match
builder1.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
@ -351,9 +349,9 @@ public class PercolatorQueryTests extends ESTestCase {
"docType",
queryRegistry,
new BytesArray("{}"),
percolateSearcher,
new MatchAllDocsQuery()
percolateSearcher
);
builder2.setPercolateTypeQuery(new MatchAllDocsQuery());
TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10);
assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits));
assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length));

View File

@ -75,7 +75,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
}
public void testSpecifiedIndexUnavailableMultipleIndices() throws Exception {
createIndex("test1");
assertAcked(prepareCreate("test1").addMapping("query", "query", "type=percolator"));
ensureYellow();
// Verify defaults
@ -136,7 +136,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getSettings("test1", "test2").setIndicesOptions(options), false);
options = IndicesOptions.strictExpandOpen();
assertAcked(prepareCreate("test2"));
assertAcked(prepareCreate("test2").addMapping("query", "query", "type=percolator"));
ensureYellow();
verify(search("test1", "test2").setIndicesOptions(options), false);
verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
@ -158,7 +158,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
}
public void testSpecifiedIndexUnavailableSingleIndexThatIsClosed() throws Exception {
assertAcked(prepareCreate("test1"));
assertAcked(prepareCreate("test1").addMapping("query", "query", "type=percolator"));
// we need to wait until all shards are allocated since recovery from
// gateway will fail unless the majority of the replicas was allocated
// pre-closing. with lots of replicas this will fail.
@ -264,7 +264,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getMapping("test1").setIndicesOptions(options), false);
verify(getSettings("test1").setIndicesOptions(options), false);
assertAcked(prepareCreate("test1"));
assertAcked(prepareCreate("test1").addMapping("query", "query", "type=percolator"));
ensureYellow();
options = IndicesOptions.strictExpandOpenAndForbidClosed();
@ -357,7 +357,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
verify(getMapping(indices).setIndicesOptions(options), false);
verify(getSettings(indices).setIndicesOptions(options), false);
assertAcked(prepareCreate("foobar"));
assertAcked(prepareCreate("foobar").addMapping("query", "query", "type=percolator"));
client().prepareIndex("foobar", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
// Verify defaults for wildcards, with one wildcard expression and one existing index

View File

@ -26,7 +26,6 @@ import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.query.Operator;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ESIntegTestCase;
@ -55,97 +54,105 @@ import static org.hamcrest.Matchers.nullValue;
/**
*/
public class MultiPercolatorIT extends ESIntegTestCase {
private final static String INDEX_NAME = "queries";
private final static String TYPE_NAME = "query";
public void testBasics() throws Exception {
assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text"));
assertAcked(prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("type", "field1", "type=text"));
ensureGreen();
logger.info("--> register a queries");
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "2")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "3")
.setSource(jsonBuilder().startObject().field("query", boolQuery()
.must(matchQuery("field1", "b"))
.must(matchQuery("field1", "c"))
).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "4")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
refresh();
MultiPercolateResponse response = client().prepareMultiPercolate()
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
.add(client().preparePercolate() // non existing doc, so error element
.setIndices("test").setDocumentType("type")
.setGetRequest(Requests.getRequest("test").type("type").id("5")))
.setIndices(INDEX_NAME).setDocumentType("type")
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5")))
.execute().actionGet();
MultiPercolateResponse.Item item = response.getItems()[0];
assertMatchCount(item.getResponse(), 2L);
assertThat(item.getResponse().getMatches(), arrayWithSize(2));
assertThat(item.getErrorMessage(), nullValue());
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4"));
item = response.getItems()[1];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 2L);
assertThat(item.getResponse().getMatches(), arrayWithSize(2));
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4"));
item = response.getItems()[2];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 4L);
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4"));
item = response.getItems()[3];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 1L);
assertThat(item.getResponse().getMatches(), arrayWithSize(1));
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContaining("4"));
item = response.getItems()[4];
assertThat(item.getResponse(), nullValue());
assertThat(item.getErrorMessage(), notNullValue());
assertThat(item.getErrorMessage(), containsString("[test/type/5] doesn't exist"));
assertThat(item.getErrorMessage(), containsString("[" + INDEX_NAME + "/type/5] doesn't exist"));
}
public void testWithRouting() throws Exception {
assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text"));
assertAcked(prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("type", "field1", "type=text"));
ensureGreen();
logger.info("--> register a queries");
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
.setRouting("a")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "2")
.setRouting("a")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "3")
.setRouting("a")
.setSource(jsonBuilder().startObject().field("query", boolQuery()
.must(matchQuery("field1", "b"))
.must(matchQuery("field1", "c"))
).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "4")
.setRouting("a")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
@ -153,69 +160,69 @@ public class MultiPercolatorIT extends ESIntegTestCase {
MultiPercolateResponse response = client().prepareMultiPercolate()
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setRouting("a")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setRouting("a")
.setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setRouting("a")
.setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
.add(client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setRouting("a")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
.add(client().preparePercolate() // non existing doc, so error element
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setRouting("a")
.setGetRequest(Requests.getRequest("test").type("type").id("5")))
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("5")))
.execute().actionGet();
MultiPercolateResponse.Item item = response.getItems()[0];
assertMatchCount(item.getResponse(), 2L);
assertThat(item.getResponse().getMatches(), arrayWithSize(2));
assertThat(item.getErrorMessage(), nullValue());
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "4"));
item = response.getItems()[1];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 2L);
assertThat(item.getResponse().getMatches(), arrayWithSize(2));
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("2", "4"));
item = response.getItems()[2];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 4L);
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContainingInAnyOrder("1", "2", "3", "4"));
item = response.getItems()[3];
assertThat(item.getErrorMessage(), nullValue());
assertMatchCount(item.getResponse(), 1L);
assertThat(item.getResponse().getMatches(), arrayWithSize(1));
assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
assertThat(convertFromTextArray(item.getResponse().getMatches(), INDEX_NAME), arrayContaining("4"));
item = response.getItems()[4];
assertThat(item.getResponse(), nullValue());
assertThat(item.getErrorMessage(), notNullValue());
assertThat(item.getErrorMessage(), containsString("[test/type/5] doesn't exist"));
assertThat(item.getErrorMessage(), containsString("[" + INDEX_NAME + "/type/5] doesn't exist"));
}
public void testExistingDocsOnly() throws Exception {
createIndex("test");
prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get();
int numQueries = randomIntBetween(50, 100);
logger.info("--> register a queries");
for (int i = 0; i < numQueries; i++) {
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i))
client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
}
client().prepareIndex("test", "type", "1")
client().prepareIndex(INDEX_NAME, "type", "1")
.setSource(jsonBuilder().startObject().field("field", "a"))
.execute().actionGet();
refresh();
@ -225,8 +232,8 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setGetRequest(Requests.getRequest("test").type("type").id("1"))
.setIndices("test").setDocumentType("type")
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1"))
.setIndices(INDEX_NAME).setDocumentType("type")
.setSize(numQueries)
);
}
@ -244,8 +251,9 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setGetRequest(Requests.getRequest("test").type("type").id("2"))
.setIndices("test").setDocumentType("type").setSize(numQueries)
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2"))
.setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries)
);
}
@ -262,14 +270,14 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setGetRequest(Requests.getRequest("test").type("type").id("2"))
.setIndices("test").setDocumentType("type").setSize(numQueries)
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("2"))
.setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries)
);
}
builder.add(
client().preparePercolate()
.setGetRequest(Requests.getRequest("test").type("type").id("1"))
.setIndices("test").setDocumentType("type").setSize(numQueries)
.setGetRequest(Requests.getRequest(INDEX_NAME).type("type").id("1"))
.setIndices(INDEX_NAME).setDocumentType("type").setSize(numQueries)
);
response = builder.execute().actionGet();
@ -280,13 +288,13 @@ public class MultiPercolatorIT extends ESIntegTestCase {
}
public void testWithDocsOnly() throws Exception {
createIndex("test");
prepareCreate(INDEX_NAME).addMapping(TYPE_NAME, "query", "type=percolator").get();
ensureGreen();
int numQueries = randomIntBetween(50, 100);
logger.info("--> register a queries");
for (int i = 0; i < numQueries; i++) {
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i))
client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
}
@ -297,7 +305,7 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setSize(numQueries)
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
}
@ -315,7 +323,7 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setSource("illegal json"));
}
@ -331,13 +339,13 @@ public class MultiPercolatorIT extends ESIntegTestCase {
for (int i = 0; i < numPercolateRequest; i++) {
builder.add(
client().preparePercolate()
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setSource("illegal json"));
}
builder.add(
client().preparePercolate()
.setSize(numQueries)
.setIndices("test").setDocumentType("type")
.setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
response = builder.execute().actionGet();
@ -350,8 +358,8 @@ public class MultiPercolatorIT extends ESIntegTestCase {
public void testNestedMultiPercolation() throws IOException {
initNestedIndexAndPercolation();
MultiPercolateRequestBuilder mpercolate= client().prepareMultiPercolate();
mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company"));
mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company"));
mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company"));
mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices(INDEX_NAME).setDocumentType("company"));
MultiPercolateResponse response = mpercolate.get();
assertEquals(response.getItems()[0].getResponse().getMatches().length, 0);
assertEquals(response.getItems()[1].getResponse().getMatches().length, 1);
@ -361,23 +369,24 @@ public class MultiPercolatorIT extends ESIntegTestCase {
public void testStartTimeIsPropagatedToShardRequests() throws Exception {
// See: https://github.com/elastic/elasticsearch/issues/15908
internalCluster().ensureAtLeastNumDataNodes(2);
client().admin().indices().prepareCreate("test")
client().admin().indices().prepareCreate(INDEX_NAME)
.setSettings(Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 1)
)
.addMapping("type", "date_field", "type=date,format=strict_date_optional_time||epoch_millis")
.addMapping(TYPE_NAME, "query", "type=percolator")
.get();
ensureGreen();
client().prepareIndex("test", ".percolator", "1")
client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
.setSource(jsonBuilder().startObject().field("query", rangeQuery("date_field").lt("now+90d")).endObject())
.setRefresh(true)
.get();
for (int i = 0; i < 32; i++) {
MultiPercolateResponse response = client().prepareMultiPercolate()
.add(client().preparePercolate().setDocumentType("type").setIndices("test")
.add(client().preparePercolate().setDocumentType("type").setIndices(INDEX_NAME)
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("date_field", "2015-07-21T10:28:01-07:00")))
.get();
assertThat(response.getItems()[0].getResponse().getCount(), equalTo(1L));
@ -392,10 +401,12 @@ public class MultiPercolatorIT extends ESIntegTestCase {
.startObject("name").field("type", "text").endObject().endObject().endObject().endObject()
.endObject();
assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping));
ensureGreen("nestedindex");
assertAcked(client().admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("company", mapping));
ensureGreen(INDEX_NAME);
client().prepareIndex("nestedindex", PercolatorFieldMapper.TYPE_NAME, "Q").setSource(jsonBuilder().startObject()
client().prepareIndex(INDEX_NAME, TYPE_NAME, "Q").setSource(jsonBuilder().startObject()
.field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(Operator.AND), ScoreMode.Avg)).endObject()).get();
refresh();

View File

@ -52,9 +52,14 @@ import static org.hamcrest.Matchers.notNullValue;
*/
public class PercolatorAggregationsIT extends ESIntegTestCase {
private final static String INDEX_NAME = "queries";
private final static String TYPE_NAME = "query";
// Just test the integration with facets and aggregations, not the facet and aggregation functionality!
public void testAggregations() throws Exception {
assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text", "field2", "type=keyword"));
assertAcked(prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("type", "field1", "type=text", "field2", "type=keyword"));
ensureGreen();
int numQueries = scaledRandomIntBetween(250, 500);
@ -70,7 +75,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
String value = values[i % numUniqueQueries];
expectedCount[i % numUniqueQueries]++;
QueryBuilder queryBuilder = matchQuery("field1", value);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i))
client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute()
.actionGet();
}
@ -79,7 +84,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
for (int i = 0; i < numQueries; i++) {
String value = values[i % numUniqueQueries];
PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate()
.setIndices("test")
.setIndices(INDEX_NAME)
.setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()))
.setSize(expectedCount[i % numUniqueQueries]);
@ -119,7 +124,9 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
// Just test the integration with facets and aggregations, not the facet and aggregation functionality!
public void testAggregationsAndPipelineAggregations() throws Exception {
assertAcked(prepareCreate("test").addMapping("type", "field1", "type=text", "field2", "type=keyword"));
assertAcked(prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("type", "field1", "type=text", "field2", "type=keyword"));
ensureGreen();
int numQueries = scaledRandomIntBetween(250, 500);
@ -135,7 +142,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
String value = values[i % numUniqueQueries];
expectedCount[i % numUniqueQueries]++;
QueryBuilder queryBuilder = matchQuery("field1", value);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i))
client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute()
.actionGet();
}
@ -144,7 +151,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
for (int i = 0; i < numQueries; i++) {
String value = values[i % numUniqueQueries];
PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate()
.setIndices("test")
.setIndices(INDEX_NAME)
.setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()))
.setSize(expectedCount[i % numUniqueQueries]);
@ -193,9 +200,11 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
}
public void testSignificantAggs() throws Exception {
client().admin().indices().prepareCreate("test").execute().actionGet();
client().admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.execute().actionGet();
ensureGreen();
PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type")
PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices(INDEX_NAME).setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject()))
.addAggregation(AggregationBuilders.significantTerms("a").field("field2"));
PercolateResponse response = percolateRequestBuilder.get();
@ -203,7 +212,8 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
}
public void testSingleShardAggregations() throws Exception {
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1))
assertAcked(prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1))
.addMapping(TYPE_NAME, "query", "type=percolator")
.addMapping("type", "field1", "type=text", "field2", "type=keyword"));
ensureGreen();
@ -213,7 +223,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
for (int i = 0; i < numQueries; i++) {
String value = "value0";
QueryBuilder queryBuilder = matchQuery("field1", value);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, Integer.toString(i))
client().prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject())
.execute()
.actionGet();
@ -223,7 +233,7 @@ public class PercolatorAggregationsIT extends ESIntegTestCase {
for (int i = 0; i < numQueries; i++) {
String value = "value0";
PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate()
.setIndices("test")
.setIndices(INDEX_NAME)
.setDocumentType("type")
.setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()))
.setSize(numQueries);

View File

@ -60,7 +60,7 @@ public class PercolatorBackwardsCompatibilityIT extends ESIntegTestCase {
assertThat(state.metaData().indices().get(INDEX_NAME).getUpgradedVersion(), equalTo(Version.CURRENT));
assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().size(), equalTo(2));
assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator"), notNullValue());
// important: verify that the query field in the .percolator mapping is of type object (from 3.0.0 this is of type percolator)
// important: verify that the query field in the .percolator mapping is of type object (from 5.x this is of type percolator)
MappingMetaData mappingMetaData = state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator");
assertThat(XContentMapValues.extractValue("properties.query.type", mappingMetaData.sourceAsMap()), equalTo("object"));
assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().get("message"), notNullValue());

View File

@ -21,8 +21,8 @@ package org.elasticsearch.search.percolator;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.highlight.HighlightBuilder;
@ -42,21 +42,24 @@ import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.equalTo;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.startsWith;
public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
public void testPercolatorQuery() throws Exception {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field1", "type=keyword", "field2", "type=keyword")
.addMapping("queries", "query", "type=percolator")
);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex("test", "queries", "1")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex("test", "queries", "2")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject())
.get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
client().prepareIndex("test", "queries", "3")
.setSource(jsonBuilder().startObject().field("query", boolQuery()
.must(matchQuery("field1", "value"))
.must(matchQuery("field2", "value"))
@ -66,7 +69,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
BytesReference source = jsonBuilder().startObject().endObject().bytes();
logger.info("percolating empty doc");
SearchResponse response = client().prepareSearch()
.setQuery(percolatorQuery("type", source))
.setQuery(percolatorQuery("query", "type", source))
.get();
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
@ -74,7 +77,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
source = jsonBuilder().startObject().field("field1", "value").endObject().bytes();
logger.info("percolating doc with 1 field");
response = client().prepareSearch()
.setQuery(percolatorQuery("type", source))
.setQuery(percolatorQuery("query", "type", source))
.addSort("_uid", SortOrder.ASC)
.get();
assertHitCount(response, 2);
@ -84,7 +87,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
source = jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject().bytes();
logger.info("percolating doc with 2 fields");
response = client().prepareSearch()
.setQuery(percolatorQuery("type", source))
.setQuery(percolatorQuery("query", "type", source))
.addSort("_uid", SortOrder.ASC)
.get();
assertHitCount(response, 3);
@ -93,19 +96,70 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
assertThat(response.getHits().getAt(2).getId(), equalTo("3"));
}
public void testPercolatorQueryExistingDocument() throws Exception {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field1", "type=keyword", "field2", "type=keyword")
.addMapping("queries", "query", "type=percolator")
);
client().prepareIndex("test", "queries", "1")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.get();
client().prepareIndex("test", "queries", "2")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject())
.get();
client().prepareIndex("test", "queries", "3")
.setSource(jsonBuilder().startObject().field("query", boolQuery()
.must(matchQuery("field1", "value"))
.must(matchQuery("field2", "value"))
).endObject()).get();
client().prepareIndex("test", "type", "1").setSource("{}").get();
client().prepareIndex("test", "type", "2").setSource("field1", "value").get();
client().prepareIndex("test", "type", "3").setSource("field1", "value", "field2", "value").get();
client().admin().indices().prepareRefresh().get();
logger.info("percolating empty doc");
SearchResponse response = client().prepareSearch()
.setQuery(percolatorQuery("query", "type", "test", "type", "1"))
.get();
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
logger.info("percolating doc with 1 field");
response = client().prepareSearch()
.setQuery(percolatorQuery("query", "type", "test", "type", "2"))
.addSort("_uid", SortOrder.ASC)
.get();
assertHitCount(response, 2);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
assertThat(response.getHits().getAt(1).getId(), equalTo("2"));
logger.info("percolating doc with 2 fields");
response = client().prepareSearch()
.setQuery(percolatorQuery("query", "type", "test", "type", "3"))
.addSort("_uid", SortOrder.ASC)
.get();
assertHitCount(response, 3);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
assertThat(response.getHits().getAt(1).getId(), equalTo("2"));
assertThat(response.getHits().getAt(2).getId(), equalTo("3"));
}
public void testPercolatorSpecificQueries() throws Exception {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field1", "type=text", "field2", "type=text")
.addMapping("queries", "query", "type=percolator")
);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex("test", "queries", "1")
.setSource(jsonBuilder().startObject().field("query", commonTermsQuery("field1", "quick brown fox")).endObject())
.get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex("test", "queries", "2")
.setSource(jsonBuilder().startObject().field("query", multiMatchQuery("quick brown fox", "field1", "field2")
.type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).endObject())
.get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
client().prepareIndex("test", "queries", "3")
.setSource(jsonBuilder().startObject().field("query",
spanNearQuery(spanTermQuery("field1", "quick"), 0)
.clause(spanTermQuery("field1", "brown"))
@ -115,7 +169,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
.get();
client().admin().indices().prepareRefresh().get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4")
client().prepareIndex("test", "queries", "4")
.setSource(jsonBuilder().startObject().field("query",
spanNotQuery(
spanNearQuery(spanTermQuery("field1", "quick"), 0)
@ -130,7 +184,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
.get();
// doesn't match
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "5")
client().prepareIndex("test", "queries", "5")
.setSource(jsonBuilder().startObject().field("query",
spanNotQuery(
spanNearQuery(spanTermQuery("field1", "quick"), 0)
@ -150,7 +204,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
.field("field2", "the quick brown fox falls down into the well")
.endObject().bytes();
SearchResponse response = client().prepareSearch()
.setQuery(percolatorQuery("type", source))
.setQuery(percolatorQuery("query", "type", source))
.addSort("_uid", SortOrder.ASC)
.get();
assertHitCount(response, 4);
@ -165,22 +219,30 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
}
public void testPercolatorQueryWithHighlighting() throws Exception {
StringBuilder fieldMapping = new StringBuilder("type=text")
.append(",store=").append(randomBoolean());
if (randomBoolean()) {
fieldMapping.append(",term_vector=with_positions_offsets");
} else if (randomBoolean()) {
fieldMapping.append(",index_options=offsets");
}
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field1", "type=text")
.addMapping("type", "field1", fieldMapping)
.addMapping("queries", "query", "type=percolator")
);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex("test", "queries", "1")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex("test", "queries", "2")
.setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "3")
client().prepareIndex("test", "queries", "3")
.setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "4")
client().prepareIndex("test", "queries", "4")
.setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject())
.execute().actionGet();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "5")
client().prepareIndex("test", "queries", "5")
.setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject())
.execute().actionGet();
client().admin().indices().prepareRefresh().get();
@ -189,7 +251,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
.field("field1", "The quick brown fox jumps over the lazy dog")
.endObject().bytes();
SearchResponse searchResponse = client().prepareSearch()
.setQuery(percolatorQuery("type", document))
.setQuery(percolatorQuery("query", "type", document))
.highlighter(new HighlightBuilder().field("field1"))
.addSort("_uid", SortOrder.ASC)
.get();
@ -210,23 +272,109 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
public void testTakePositionOffsetGapIntoAccount() throws Exception {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field", "type=text,position_increment_gap=5")
.addMapping("queries", "query", "type=percolator")
);
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
client().prepareIndex("test", "queries", "1")
.setSource(jsonBuilder().startObject().field("query",
new MatchPhraseQueryBuilder("field", "brown fox").slop(4)).endObject())
.get();
client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
client().prepareIndex("test", "queries", "2")
.setSource(jsonBuilder().startObject().field("query",
new MatchPhraseQueryBuilder("field", "brown fox").slop(5)).endObject())
.get();
client().admin().indices().prepareRefresh().get();
SearchResponse response = client().prepareSearch().setQuery(
QueryBuilders.percolatorQuery("type", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"))
QueryBuilders.percolatorQuery("query", "type", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"))
).get();
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getId(), equalTo("2"));
}
public void testIllegalMappings() throws Exception {
String queryFieldName = randomAsciiOfLength(8);
MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("doc_type", "field", "type=keyword")
.addMapping("query_type1", queryFieldName, "type=percolator")
.addMapping("query_type2", queryFieldName, "type=percolator", "second_query_field", "type=percolator")
.addMapping("query_type3", jsonBuilder().startObject().startObject("query_type3").startObject("properties")
.startObject("object_field")
.field("type", "object")
.startObject("properties")
.startObject(queryFieldName)
.field("type", "percolator")
.endObject()
.endObject()
.endObject()
.endObject().endObject())
);
});
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
assertThat(e.getCause().getMessage(), startsWith("Up to one percolator field type is allowed per index"));
}
public void testWithMultiplePercolatorFields() throws Exception {
String queryFieldName = randomAsciiOfLength(8);
createIndex("test1", client().admin().indices().prepareCreate("test1")
.addMapping("doc_type", "field", "type=keyword")
.addMapping("query_type", queryFieldName, "type=percolator"));
createIndex("test2", client().admin().indices().prepareCreate("test2")
.addMapping("doc_type", "field", "type=keyword")
.addMapping("query_type", jsonBuilder().startObject().startObject("query_type").startObject("properties")
.startObject("object_field")
.field("type", "object")
.startObject("properties")
.startObject(queryFieldName)
.field("type", "percolator")
.endObject()
.endObject()
.endObject()
.endObject().endObject())
);
// Acceptable:
client().prepareIndex("test1", "query_type", "1")
.setSource(jsonBuilder().startObject().field(queryFieldName, matchQuery("field", "value")).endObject())
.get();
client().prepareIndex("test2", "query_type", "1")
.setSource(jsonBuilder().startObject().startObject("object_field")
.field(queryFieldName, matchQuery("field", "value"))
.endObject().endObject())
.get();
client().admin().indices().prepareRefresh().get();
BytesReference source = jsonBuilder().startObject().field("field", "value").endObject().bytes();
SearchResponse response = client().prepareSearch()
.setQuery(percolatorQuery(queryFieldName, "doc_type", source))
.setIndices("test1")
.get();
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
assertThat(response.getHits().getAt(0).type(), equalTo("query_type"));
assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
response = client().prepareSearch()
.setQuery(percolatorQuery("object_field." + queryFieldName, "doc_type", source))
.setIndices("test2")
.get();
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
assertThat(response.getHits().getAt(0).type(), equalTo("query_type"));
assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
// Unacceptable:
MapperParsingException e = expectThrows(MapperParsingException.class, () -> {
client().prepareIndex("test2", "query_type", "1")
.setSource(jsonBuilder().startObject().startArray("object_field")
.startObject().field(queryFieldName, matchQuery("field", "value")).endObject()
.startObject().field(queryFieldName, matchQuery("field", "value")).endObject()
.endArray().endObject())
.get();
});
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
assertThat(e.getCause().getMessage(), equalTo("a document can only contain one percolator query"));
}
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
@ -350,8 +349,9 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
client().prepareIndex(INDEX, TYPE, "" + i)
.setSource(source).execute().actionGet();
}
client().admin().indices().preparePutMapping(INDEX).setType("query").setSource("query", "type=percolator").get();
client().prepareIndex(INDEX, PercolatorFieldMapper.TYPE_NAME, "4")
client().prepareIndex(INDEX, "query", "4")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();

View File

@ -219,10 +219,11 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase {
public void testThatPercolatingExistingDocumentGetRequestContainsContextAndHeaders() throws Exception {
Client client = transportClient();
client.prepareIndex(lookupIndex, ".percolator", "1")
.setSource(
jsonBuilder()
.startObject().startObject("query").startObject("match").field("name", "star wars").endObject().endObject().endObject())
client.admin().indices().preparePutMapping(lookupIndex).setType("query").setSource("query", "type=percolator").get();
client.prepareIndex(lookupIndex, "query", "1")
.setSource(jsonBuilder().startObject()
.startObject("query").startObject("match").field("name", "star wars").endObject().endObject()
.endObject())
.get();
client.prepareIndex(lookupIndex, "type", "1")
.setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject())

View File

@ -41,6 +41,8 @@ Attachment datatype::
which supports indexing `attachments` like Microsoft Office formats, Open
Document formats, ePub, HTML, etc. into an `attachment` datatype.
<<percolator>>:: Accepts queries from the query-dsl
[float]
=== Multi-fields
@ -83,6 +85,8 @@ include::types/text.asciidoc[]
include::types/token-count.asciidoc[]
include::types/percolator.asciidoc[]

View File

@ -0,0 +1,86 @@
[[percolator]]
=== Percolator type
The `percolator` field type parses a json structure into a native query and
stores that query, so that the <<query-dsl-percolator-query,percolator query>>
can use it to match provided documents.
Any field that contains a json object can be configured to be a percolator
field. The percolator field type has no settings. Just configuring the `percolator`
field type is sufficient to instruct Elasticsearch to treat a field as a
query.
If the following mapping configures the `percolator` field type for the
`query` field:
[source,js]
--------------------------------------------------
{
"properties": {
"query": {
"type": "percolator"
}
}
}
--------------------------------------------------
Then the following json snippet can be indexed as a native query:
[source,js]
--------------------------------------------------
{
"query" : {
"match" : {
"field" : "value"
}
}
}
--------------------------------------------------
[IMPORTANT]
=====================================
Fields referred to in a percolator query must *already* exist in the mapping
associated with the index used for percolation. In order to make sure these fields exist,
add or update a mapping via the <<indices-create-index,create index>> or <<indices-put-mapping,put mapping>> APIs.
Fields referred in a percolator query may exist in any type of the index containing the `percolator` field type.
Also an index can only contain up to one percolator field mapping. Multiple percolator fields will be rejected by the
put index and put mapping APIs.
=====================================
[float]
==== Dedicated Percolator Index
Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in,
these queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index
can have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated
percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index.
Otherwise percolate queries can be parsed incorrectly.
[float]
==== Forcing Unmapped Fields to be Handled as Strings
In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields
that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated
to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient
if all unmapped fields are handled as if these were default string fields. In those cases one can configure the
`index.percolator.map_unmapped_fields_as_string` setting to `true` (default to `false`) and then if a field referred in
a percolator query does not exist, it will be handled as a default string field so that adding the percolator query doesn't
fail.
[float]
==== Important Notes
Because the percolator query is processing one document at a time, it doesn't support queries and filters that run
against child documents such as `has_child` and `has_parent`.
There are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when
using terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these
queries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator`
query evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used.
The `wildcard` and `regexp` query natively use a lot of memory and because the percolator keeps the queries into memory
this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to
achieve the same result (with way less memory being used).

View File

@ -15,7 +15,16 @@ percolation.
Percolator and multi percolate APIs have been deprecated and will be removed in the next major release. These APIs have
been replaced by the `percolator` query that can be used in the search and multi search APIs.
==== Percolator mapping
==== Percolator field mapping
The `.percolator` type can no longer be used to index percolator queries.
Instead a <<percolator,percolator field type>> must be configured prior to indexing percolator queries.
Indices with a `.percolator` type created on a version before 5.0.0 can still be used,
but new indices no longer accept the `.percolator` type.
==== Percolate document mapping
The `percolator` query can no longer accept documents that reference fields
that don't already exist in the mapping. Before the percolate API allowed this.

View File

@ -1,49 +1,56 @@
[[query-dsl-percolator-query]]
=== Percolator Query
Traditionally you design documents based on your data, store them into an index, and then define queries via the search API
in order to retrieve these documents. The percolator works in the opposite direction. First you store queries into an
index and then you use the `percolator` query to search for the queries which match a specified document (or documents).
The reason that queries can be stored comes from the fact that in Elasticsearch both documents and queries are defined in
JSON. This allows you to embed queries into documents via the index API. Elasticsearch can extract the query from a
document and make it available for search via the `percolator` query. Since documents are also defined as JSON,
you can define a document in the `percolator` query.
[IMPORTANT]
=====================================
Fields referred to in a percolator query must *already* exist in the mapping
associated with the index used for percolation. In order to make sure these fields exist,
add or update a mapping via the <<indices-create-index,create index>> or <<indices-put-mapping,put mapping>> APIs.
=====================================
The `percolator` query can be used to match queries
stored in an index. The `percolator` query itself
contains the document that will be used as query
to match with the stored queries.
[float]
=== Sample Usage
Create an index with a mapping for the field `message`:
Create an index with two mappings:
[source,js]
--------------------------------------------------
curl -XPUT 'localhost:9200/my-index' -d '{
curl -XPUT "http://localhost:9200/my-index" -d'
{
"mappings": {
"my-type": {
"doctype": {
"properties": {
"message": {
"type": "string"
}
}
},
"queries": {
"properties": {
"query": {
"type": "percolator"
}
}
}
}
}'
--------------------------------------------------
The `doctype` mapping is the mapping used to preprocess
the document defined in the `percolator` query before it
gets indexed into a temporary index.
The `queries` mapping is the mapping used for indexing
the query documents. The `query` field will hold a json
object that represents an actual Elasticsearch query. The
`query` field has been configured to use the
<<percolator,percolator field type>>. This field type understands
the query dsl and stored the query in such a way that it
can be used later on to match documents defined on the `percolator` query.
Register a query in the percolator:
[source,js]
--------------------------------------------------
curl -XPUT 'localhost:9200/my-index/.percolator/1' -d '{
curl -XPUT 'localhost:9200/my-index/queries/1' -d '{
"query" : {
"match" : {
"message" : "bonsai tree"
@ -59,7 +66,8 @@ Match a document to the registered percolator queries:
curl -XGET 'localhost:9200/my-index/_search' -d '{
"query" : {
"percolator" : {
"document_type" : "my-type",
"field" : "query",
"document_type" : "doctype",
"document" : {
"message" : "A new bonsai tree in the office"
}
@ -82,13 +90,13 @@ The above request will yield the following response:
},
"hits": {
"total": 1,
"max_score": 0,
"max_score": 0.5716521,
"hits": [
{ <1>
"_index": "my-index",
"_type": ".percolator",
"_type": "queries",
"_id": "1",
"_score": 0,
"_score": 0.5716521,
"_source": {
"query": {
"match": {
@ -104,73 +112,14 @@ The above request will yield the following response:
<1> The percolate query with id `1` matches our document.
[float]
=== Indexing Percolator Queries
Percolate queries are stored as documents in a specific format and in an arbitrary index under a reserved type with the
name `.percolator`. The query itself is placed as is in a JSON object under the top level field `query`.
[source,js]
--------------------------------------------------
{
"query" : {
"match" : {
"field" : "value"
}
}
}
--------------------------------------------------
Since this is just an ordinary document, any field can be added to this document. This can be useful later on to only
percolate documents by specific queries.
[source,js]
--------------------------------------------------
{
"query" : {
"match" : {
"field" : "value"
}
},
"priority" : "high"
}
--------------------------------------------------
Just as with any other type, the `.percolator` type has a mapping, which you can configure via the mappings APIs.
The default percolate mapping doesn't index the query field, only stores it.
Because `.percolate` is a type it also has a mapping. By default the following mapping is active:
[source,js]
--------------------------------------------------
{
".percolator" : {
"properties" : {
"query" : {
"type" : "percolator"
}
}
}
}
--------------------------------------------------
If needed, this mapping can be modified with the update mapping API.
In order to un-register a percolate query the delete API can be used. So if the previous added query needs to be deleted
the following delete requests needs to be executed:
[source,js]
--------------------------------------------------
curl -XDELETE localhost:9200/my-index/.percolator/1
--------------------------------------------------
[float]
==== Parameters
The following parameters are required when percolating a document:
[horizontal]
`document_type`:: The type / mapping of the document being percolated. This is parameter is always required.
`field`:: The field of type `percolator` and that holds the indexed queries. This is a required parameter.
`document_type`:: The type / mapping of the document being percolated. This is a required parameter.
`document`:: The source of the document being percolated.
Instead of specifying a the source of the document being percolated, the source can also be retrieved from an already
@ -186,15 +135,6 @@ In that case the `document` parameter can be substituted with the following para
`preference`:: Optionally, preference to be used to fetch document to percolate.
`version`:: Optionally, the expected version of the document to be fetched.
[float]
==== Dedicated Percolator Index
Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in,
these queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index
can have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated
percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index.
Otherwise percolate queries can be parsed incorrectly.
[float]
==== Percolating an Existing Document
@ -243,7 +183,8 @@ curl -XGET "http://localhost:9200/my-index/_search" -d'
{
"query" : {
"percolator" : {
"document_type" : "my-type",
"field": "query",
"document_type" : "doctype",
"index" : "my-index",
"type" : "message",
"id" : "1",
@ -275,7 +216,7 @@ Add a percolator query:
[source,js]
--------------------------------------------------
curl -XPUT "http://localhost:9200/my-index/.percolator/1" -d'
curl -XPUT "http://localhost:9200/my-index/queries/1" -d'
{
"query" : {
"match" : {
@ -289,7 +230,7 @@ Add another percolator query:
[source,js]
--------------------------------------------------
curl -XPUT "http://localhost:9200/my-index/.percolator/2" -d'
curl -XPUT "http://localhost:9200/my-index/queries/2" -d'
{
"query" : {
"match" : {
@ -299,7 +240,7 @@ curl -XPUT "http://localhost:9200/my-index/.percolator/2" -d'
}'
--------------------------------------------------
Execute a search request with `percolator` and highlighting enabled:
Execute a search request with the `percolator` query and highlighting enabled:
[source,js]
--------------------------------------------------
@ -307,7 +248,8 @@ curl -XGET "http://localhost:9200/my-index/_search" -d'
{
"query" : {
"percolator" : {
"document_type" : "my-type",
"field": "query",
"document_type" : "doctype",
"document" : {
"message" : "The quick brown fox jumps over the lazy dog"
}
@ -326,7 +268,7 @@ This will yield the following response.
[source,js]
--------------------------------------------------
{
"took": 14,
"took": 83,
"timed_out": false,
"_shards": {
"total": 5,
@ -335,13 +277,13 @@ This will yield the following response.
},
"hits": {
"total": 2,
"max_score": 0,
"max_score": 0.5446649,
"hits": [
{
"_index": "my-index",
"_type": ".percolator",
"_type": "queries",
"_id": "2",
"_score": 0,
"_score": 0.5446649,
"_source": {
"query": {
"match": {
@ -351,15 +293,15 @@ This will yield the following response.
},
"highlight": {
"message": [
"The quick brown fox jumps over the <em>lazy</em> <em>dog</em>" <1>
"The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"
]
}
},
{
"_index": "my-index",
"_type": ".percolator",
"_type": "queries",
"_id": "1",
"_score": 0,
"_score": 0.5446649,
"_source": {
"query": {
"match": {
@ -369,7 +311,7 @@ This will yield the following response.
},
"highlight": {
"message": [
"The quick <em>brown</em> <em>fox</em> jumps over the lazy dog" <1>
"The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"
]
}
}
@ -384,41 +326,14 @@ This will yield the following response.
[float]
==== How it Works Under the Hood
When indexing a document that contains a query in an index and the `.percolator` type, the query part of the documents gets
parsed into a Lucene query and is kept in memory until that percolator document is removed or the index containing the
`.percolator` type gets removed. So, all the active percolator queries are kept in memory.
When indexing a document into an index that has the <<percolator,percolator field type>> mapping configured, the query
part of the documents gets parsed into a Lucene query and is kept in memory until that percolator document is removed.
So, all the active percolator queries are kept in memory.
At search time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory
Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries
that are registered to the index that the searh request is targeted for, are going to be executed on this single document
temporary Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries
that are registered to the index that the search request is targeted for, are going to be executed on this single document
in-memory index. This happens on each shard the search request needs to execute.
By using `routing` or additional queries the amount of percolator queries that need to be executed can be reduced and thus
the time the search API needs to run can be decreased.
[float]
==== Important Notes
Because the percolator query is processing one document at a time, it doesn't support queries and filters that run
against child documents such as `has_child` and `has_parent`.
The percolator doesn't work with queries like `template` and `geo_shape` queries when these queries fetch documents
to substitute parts of the query. The reason is that the percolator stores the query terms during indexing in order to
speedup percolating in certain cases and this doesn't work if part of the query is defined in another document.
There is no way to know for the percolator to know if an external document has changed and even if this was the case the
percolator query has to be reindexed.
The `wildcard` and `regexp` query natively use a lot of memory and because the percolator keeps the queries into memory
this can easily take up the available memory in the heap space. If possible try to use a `prefix` query or ngramming to
achieve the same result (with way less memory being used).
[float]
==== Forcing Unmapped Fields to be Handled as Strings
In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields
that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated
to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient
if all unmapped fields are handled as if these were default string fields. In those cases one can configure the
`index.percolator.map_unmapped_fields_as_string` setting to `true` (default to `false`) and then if a field referred in
a percolator query does not exist, it will be handled as a default string field so that adding the percolator query doesn't
fail.
the time the search API needs to run can be decreased.

View File

@ -1,5 +1,16 @@
---
"Basic multi-percolate":
- do:
indices.create:
index: percolator_index
body:
mappings:
queries:
properties:
query:
type: percolator
- do:
index:
index: percolator_index
@ -10,7 +21,7 @@
- do:
index:
index: percolator_index
type: .percolator
type: queries
id: test_percolator
body:
query:

View File

@ -4,11 +4,17 @@
- do:
indices.create:
index: test_index
body:
mappings:
queries:
properties:
query:
type: percolator
- do:
index:
index: test_index
type: .percolator
type: queries
id: test_percolator
body:
query:

View File

@ -4,11 +4,17 @@
- do:
indices.create:
index: percolator_index
body:
mappings:
queries:
properties:
query:
type: percolator
- do:
index:
index: percolator_index
type: .percolator
type: queries
id: test_percolator
body:
query:

View File

@ -4,6 +4,12 @@
- do:
indices.create:
index: test_index
body:
mappings:
queries:
properties:
query:
type: percolator
- do:
indices.refresh: {}

View File

@ -10,11 +10,15 @@
properties:
foo:
type: text
queries:
properties:
query:
type: percolator
- do:
index:
index: test_index
type: .percolator
type: queries
id: test_percolator
body:
query:

View File

@ -14,6 +14,10 @@ setup:
properties:
name:
type: text
queries:
properties:
query:
type: percolator
- do:
@ -21,7 +25,7 @@ setup:
- do:
index:
index: nestedindex
type: ".percolator"
type: "queries"
id: query
body: { "query": { "nested": { "path": "employee", "score_mode": "avg", "query": { "match": { "employee.name": { "query": "virginia potts", "operator": "and"} } } } } }
- do: