Parent / Child Support, closes #553.

This commit is contained in:
kimchy 2010-12-08 00:16:05 +02:00
parent 1a8017d17e
commit 54437c1bd3
67 changed files with 3360 additions and 66 deletions

View File

@ -0,0 +1,190 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.child;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.action.bulk.BulkRequestBuilder;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import java.io.IOException;
import static org.elasticsearch.client.Requests.*;
import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
import static org.elasticsearch.common.settings.ImmutableSettings.*;
import static org.elasticsearch.common.xcontent.XContentFactory.*;
import static org.elasticsearch.index.query.xcontent.QueryBuilders.*;
import static org.elasticsearch.node.NodeBuilder.*;
/**
* @author kimchy (shay.banon)
*/
public class ChildSearchBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.engine.robin.refreshInterval", "-1")
.put("gateway.type", "local")
.put(SETTING_NUMBER_OF_SHARDS, 2)
.put(SETTING_NUMBER_OF_REPLICAS, 1)
.build();
Node node1 = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node1")).node();
Node node2 = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node2")).node();
Node clientNode = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
Client client = clientNode.client();
long COUNT = SizeValue.parseSizeValue("1m").singles();
int CHILD_COUNT = 5;
int BATCH = 100;
int QUERY_COUNT = 500;
Thread.sleep(10000);
try {
client.admin().indices().create(createIndexRequest("test")).actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
request.add(Requests.indexRequest("test").type("parent").id(Integer.toString(counter))
.source(parentSource(Integer.toString(counter), "test" + counter)));
for (int k = 0; k < CHILD_COUNT; k++) {
request.add(Requests.indexRequest("test").type("child").id(Integer.toString(counter) + "_" + k)
.parent(Integer.toString(counter))
.source(childSource(Integer.toString(counter), "tag" + k)));
}
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) * (1 + CHILD_COUNT) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT * (1 + CHILD_COUNT))) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.timedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().count());
System.out.println("--> Running just child query");
// run just the child query, warm up first
for (int j = 0; j < 100; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
if (j == 0) {
System.out.println("--> Warmup took: " + searchResponse.took());
}
if (searchResponse.hits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
long totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
if (searchResponse.hits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.tookInMillis();
}
System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_child query");
// run parent child constant query
for (int j = 0; j < 100; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(hasChildQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
if (searchResponse.hits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(hasChildQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
if (searchResponse.hits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.tookInMillis();
}
System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running top_children query");
// run parent child score query
for (int j = 0; j < 100; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
// we expect to have mismatch on hits here
// if (searchResponse.hits().totalHits() != COUNT) {
// System.err.println("mismatch on hits");
// }
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch().setQuery(topChildrenQuery("child", termQuery("tag", "tag1"))).execute().actionGet();
// we expect to have mismatch on hits here
// if (searchResponse.hits().totalHits() != COUNT) {
// System.err.println("mismatch on hits");
// }
totalQueryTime += searchResponse.tookInMillis();
}
System.out.println("--> top_children Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
clientNode.close();
node1.close();
node2.close();
}
private static XContentBuilder parentSource(String id, String nameValue) throws IOException {
return jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
}
private static XContentBuilder childSource(String id, String tag) throws IOException {
return jsonBuilder().startObject().field("id", id).field("tag", tag).endObject();
}
}

View File

@ -85,7 +85,7 @@ public class SingleThreadBulkStress {
System.err.println("failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("Indexed " + (i * 100) + " took " + stopWatch.stop().lastTaskTime());
System.out.println("Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}

View File

@ -110,6 +110,7 @@ public class BulkRequest implements ActionRequest {
String type = null;
String id = null;
String routing = null;
String parent = null;
String opType = null;
String currentFieldName = null;
@ -125,6 +126,8 @@ public class BulkRequest implements ActionRequest {
id = parser.text();
} else if ("_routing".equals(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
opType = parser.text();
}
@ -138,17 +141,18 @@ public class BulkRequest implements ActionRequest {
if (nextMarker == -1) {
break;
}
// order is important, we set parent after routing, so routing will be set to parent if not set explicitly
if ("index".equals(action)) {
if (opType == null) {
add(new IndexRequest(index, type, id).routing(routing)
add(new IndexRequest(index, type, id).routing(routing).parent(parent)
.source(data, from, nextMarker - from, contentUnsafe));
} else {
add(new IndexRequest(index, type, id).routing(routing)
add(new IndexRequest(index, type, id).routing(routing).parent(parent)
.create("create".equals(opType))
.source(data, from, nextMarker - from, contentUnsafe));
}
} else if ("create".equals(action)) {
add(new IndexRequest(index, type, id).routing(routing)
add(new IndexRequest(index, type, id).routing(routing).parent(parent)
.create(true)
.source(data, from, nextMarker - from, contentUnsafe));
}

View File

@ -116,7 +116,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
}
}
SourceToParse sourceToParse = SourceToParse.source(indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()).routing(indexRequest.routing());
SourceToParse sourceToParse = SourceToParse.source(indexRequest.source()).type(indexRequest.type()).id(indexRequest.id())
.routing(indexRequest.routing()).parent(indexRequest.parent());
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
ops[i] = indexShard.prepareIndex(sourceToParse);
} else {
@ -198,7 +199,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
if (item.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) item.request();
try {
SourceToParse sourceToParse = SourceToParse.source(indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()).routing(indexRequest.routing());
SourceToParse sourceToParse = SourceToParse.source(indexRequest.source()).type(indexRequest.type()).id(indexRequest.id())
.routing(indexRequest.routing()).parent(indexRequest.parent());
if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
ops[i] = indexShard.prepareIndex(sourceToParse);
} else {

View File

@ -113,6 +113,7 @@ public class IndexRequest extends ShardReplicationOperationRequest {
private String type;
private String id;
@Nullable private String routing;
@Nullable private String parent;
private byte[] source;
private int sourceOffset;
@ -255,6 +256,22 @@ public class IndexRequest extends ShardReplicationOperationRequest {
return this.routing;
}
/**
* Sets the parent id of this document. If routing is not set, automatically set it as the
* routing as well.
*/
public IndexRequest parent(String parent) {
this.parent = parent;
if (routing == null) {
routing = parent;
}
return this;
}
public String parent() {
return this.parent;
}
/**
* The source of the document to index, recopied to a new array if it has an offset or unsafe.
*/
@ -532,6 +549,9 @@ public class IndexRequest extends ShardReplicationOperationRequest {
if (in.readBoolean()) {
routing = in.readUTF();
}
if (in.readBoolean()) {
parent = in.readUTF();
}
sourceUnsafe = false;
sourceOffset = 0;
@ -558,6 +578,12 @@ public class IndexRequest extends ShardReplicationOperationRequest {
out.writeBoolean(true);
out.writeUTF(routing);
}
if (parent == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeUTF(parent);
}
out.writeVInt(sourceLength);
out.writeBytes(source, sourceOffset, sourceLength);
out.writeByte(opType.id());

View File

@ -160,7 +160,8 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
}
IndexShard indexShard = indexShard(shardRequest);
SourceToParse sourceToParse = SourceToParse.source(request.source()).type(request.type()).id(request.id()).routing(request.routing());
SourceToParse sourceToParse = SourceToParse.source(request.source()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent());
ParsedDocument doc;
if (request.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse);
@ -180,7 +181,8 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi
@Override protected void shardOperationOnReplica(ShardOperationRequest shardRequest) {
IndexShard indexShard = indexShard(shardRequest);
IndexRequest request = shardRequest.request;
SourceToParse sourceToParse = SourceToParse.source(request.source()).type(request.type()).id(request.id()).routing(request.routing());
SourceToParse sourceToParse = SourceToParse.source(request.source()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent());
if (request.opType() == IndexRequest.OpType.INDEX) {
Engine.Index index = indexShard.prepareIndex(sourceToParse);
index.refresh(request.refresh());

View File

@ -78,6 +78,15 @@ public class IndexRequestBuilder extends BaseRequestBuilder<IndexRequest, IndexR
return this;
}
/**
* Sets the parent id of this document. If routing is not set, automatically set it as the
* routing as well.
*/
public IndexRequestBuilder setParent(String parent) {
request.parent(parent);
return this;
}
/**
* Index the Map as a JSON.
*

View File

@ -0,0 +1,60 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.util.Arrays;
/**
* @author kimchy (shay.banon)
*/
public class BytesWrap {
private final byte[] bytes;
// we pre-compute the hashCode for better performance (especially in IdCache)
private final int hashCode;
public BytesWrap(byte[] bytes) {
this.bytes = bytes;
this.hashCode = Arrays.hashCode(bytes);
}
public BytesWrap(String str) {
this(Unicode.fromStringAsBytes(str));
}
public byte[] bytes() {
return this.bytes;
}
public String utf8ToString() {
return Unicode.fromBytes(bytes);
}
@Override public boolean equals(Object o) {
if (this == o) return true;
BytesWrap bytesWrap = (BytesWrap) o;
return Arrays.equals(bytes, bytesWrap.bytes);
}
@Override public int hashCode() {
return hashCode;
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class EmptyScorer extends Scorer {
public EmptyScorer(Similarity similarity) {
super(similarity);
}
@Override public float score() throws IOException {
return 0;
}
@Override public int docID() {
return NO_MORE_DOCS;
}
@Override public int nextDoc() throws IOException {
return NO_MORE_DOCS;
}
@Override public int advance(int target) throws IOException {
return NO_MORE_DOCS;
}
}

View File

@ -54,6 +54,14 @@ public class ExtTObjectIntHasMap<T> extends TObjectIntHashMap<T> {
return this;
}
/**
* Returns an already existing key, or <tt>null</tt> if it does not exists.
*/
public T key(T key) {
int index = index(key);
return index < 0 ? null : (T) _set[index];
}
@Override public final int get(T key) {
int index = index(key);
return index < 0 ? defaultReturnValue : _values[index];

View File

@ -25,13 +25,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.field.data.FieldDataCache;
import org.elasticsearch.index.cache.field.data.none.NoneFieldDataCache;
import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
import org.elasticsearch.index.cache.id.IdCache;
import org.elasticsearch.index.settings.IndexSettings;
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.*;
/**
* @author kimchy (shay.banon)
*/
@ -41,14 +38,14 @@ public class IndexCache extends AbstractIndexComponent {
private final FieldDataCache fieldDataCache;
public IndexCache(Index index) {
this(index, EMPTY_SETTINGS, new NoneFilterCache(index, EMPTY_SETTINGS), new NoneFieldDataCache(index, EMPTY_SETTINGS));
}
private final IdCache idCache;
@Inject public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, FieldDataCache fieldDataCache) {
@Inject public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, FieldDataCache fieldDataCache,
IdCache idCache) {
super(index, indexSettings);
this.filterCache = filterCache;
this.fieldDataCache = fieldDataCache;
this.idCache = idCache;
}
public FilterCache filter() {
@ -59,18 +56,25 @@ public class IndexCache extends AbstractIndexComponent {
return fieldDataCache;
}
public IdCache idCache() {
return this.idCache;
}
public void clear(IndexReader reader) {
filterCache.clear(reader);
fieldDataCache.clear(reader);
idCache.clear(reader);
}
public void clear() {
filterCache.clear();
fieldDataCache.clear();
idCache.clear();
}
public void clearUnreferenced() {
filterCache.clearUnreferenced();
fieldDataCache.clearUnreferenced();
idCache.clearUnreferenced();
}
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.field.data.FieldDataCacheModule;
import org.elasticsearch.index.cache.filter.FilterCacheModule;
import org.elasticsearch.index.cache.id.IdCacheModule;
/**
* @author kimchy (shay.banon)
@ -38,6 +39,7 @@ public class IndexCacheModule extends AbstractModule {
@Override protected void configure() {
new FilterCacheModule(settings).configure(binder());
new FieldDataCacheModule(settings).configure(binder());
new IdCacheModule(settings).configure(binder());
bind(IndexCache.class).asEagerSingleton();
}

View File

@ -0,0 +1,41 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.apache.lucene.index.IndexReader;
/**
* @author kimchy (shay.banon)
*/
public interface IdCache extends Iterable<IdReaderCache> {
void clear();
void clear(IndexReader reader);
/**
* Clears unreferenced readers.
*/
void clearUnreferenced();
void refresh(IndexReader[] readers) throws Exception;
IdReaderCache reader(IndexReader reader);
}

View File

@ -0,0 +1,47 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Scopes;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
/**
* @author kimchy (shay.banon)
*/
public class IdCacheModule extends AbstractModule {
public static final class IdCacheSettings {
public static final String ID_CACHE_TYPE = "index.cache.id.type";
}
private final Settings settings;
public IdCacheModule(Settings settings) {
this.settings = settings;
}
@Override protected void configure() {
bind(IdCache.class)
.to(settings.getAsClass(IdCacheSettings.ID_CACHE_TYPE, SimpleIdCache.class, "org.elasticsearch.index.cache.id.", "IdCache"))
.in(Scopes.SINGLETON);
}
}

View File

@ -0,0 +1,36 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.elasticsearch.common.BytesWrap;
/**
* @author kimchy (shay.banon)
*/
public interface IdReaderCache {
Object readerCacheKey();
IdReaderTypeCache type(String type);
BytesWrap parentIdByDoc(String type, int docId);
int docById(String type, BytesWrap id);
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.elasticsearch.common.BytesWrap;
/**
* @author kimchy (shay.banon)
*/
public interface IdReaderTypeCache {
BytesWrap parentIdByDoc(int docId);
int docById(BytesWrap id);
}

View File

@ -0,0 +1,248 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id.simple;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.common.BytesWrap;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.collect.MapMaker;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.trove.ExtTObjectIntHasMap;
import org.elasticsearch.index.cache.id.IdCache;
import org.elasticsearch.index.cache.id.IdReaderCache;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
/**
* @author kimchy (shay.banon)
*/
public class SimpleIdCache implements IdCache {
private final ConcurrentMap<Object, SimpleIdReaderCache> idReaders;
@Inject public SimpleIdCache() {
idReaders = new MapMaker().weakKeys().makeMap();
}
@Override public void clear() {
idReaders.clear();
}
@Override public void clear(IndexReader reader) {
idReaders.remove(reader.getFieldCacheKey());
}
@Override public void clearUnreferenced() {
// nothing to do here...
}
@Override public IdReaderCache reader(IndexReader reader) {
return idReaders.get(reader.getFieldCacheKey());
}
@SuppressWarnings({"unchecked"}) @Override public Iterator<IdReaderCache> iterator() {
return (Iterator<IdReaderCache>) idReaders.values();
}
@SuppressWarnings({"StringEquality"})
@Override public void refresh(IndexReader[] readers) throws Exception {
// do a quick check for the common case, that all are there
if (refreshNeeded(readers)) {
synchronized (idReaders) {
if (!refreshNeeded(readers)) {
return;
}
// do the refresh
Map<Object, Map<String, TypeBuilder>> builders = new HashMap<Object, Map<String, TypeBuilder>>();
// first, go over and load all the id->doc map for all types
for (IndexReader reader : readers) {
if (idReaders.containsKey(reader.getFieldCacheKey())) {
// no need, continue
continue;
}
HashMap<String, TypeBuilder> readerBuilder = new HashMap<String, TypeBuilder>();
builders.put(reader.getFieldCacheKey(), readerBuilder);
String field = StringHelper.intern(UidFieldMapper.NAME);
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms(new Term(field));
try {
do {
Term term = termEnum.term();
if (term == null || term.field() != field) break;
// TODO we can optimize this, since type is the prefix, and we get terms ordered
// so, only need to move to the next type once its different
Uid uid = Uid.createUid(term.text());
TypeBuilder typeBuilder = readerBuilder.get(uid.type());
if (typeBuilder == null) {
typeBuilder = new TypeBuilder(reader);
readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
}
BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
termDocs.seek(termEnum);
while (termDocs.next()) {
// when traversing, make sure to ignore deleted docs, so the key->docId will be correct
if (!reader.isDeleted(termDocs.doc())) {
typeBuilder.idToDoc.put(idAsBytes, termDocs.doc());
}
}
} while (termEnum.next());
} finally {
termDocs.close();
termEnum.close();
}
}
// now, go and load the docId->parentId map
for (IndexReader reader : readers) {
if (idReaders.containsKey(reader.getFieldCacheKey())) {
// no need, continue
continue;
}
Map<String, TypeBuilder> readerBuilder = builders.get(reader.getFieldCacheKey());
int t = 1; // current term number (0 indicated null value)
String field = StringHelper.intern(ParentFieldMapper.NAME);
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms(new Term(field));
try {
do {
Term term = termEnum.term();
if (term == null || term.field() != field) break;
// TODO we can optimize this, since type is the prefix, and we get terms ordered
// so, only need to move to the next type once its different
Uid uid = Uid.createUid(term.text());
TypeBuilder typeBuilder = readerBuilder.get(uid.type());
if (typeBuilder == null) {
typeBuilder = new TypeBuilder(reader);
readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
}
BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
boolean added = false; // optimize for when all the docs are deleted for this id
termDocs.seek(termEnum);
while (termDocs.next()) {
// ignore deleted docs while we are at it
if (!reader.isDeleted(termDocs.doc())) {
if (!added) {
typeBuilder.parentIdsValues.add(idAsBytes);
added = true;
}
typeBuilder.parentIdsOrdinals[termDocs.doc()] = t;
}
}
if (added) {
t++;
}
} while (termEnum.next());
} finally {
termDocs.close();
termEnum.close();
}
}
// now, build it back
for (Map.Entry<Object, Map<String, TypeBuilder>> entry : builders.entrySet()) {
MapBuilder<String, SimpleIdReaderTypeCache> types = MapBuilder.newMapBuilder();
for (Map.Entry<String, TypeBuilder> typeBuilderEntry : entry.getValue().entrySet()) {
types.put(typeBuilderEntry.getKey(), new SimpleIdReaderTypeCache(typeBuilderEntry.getKey(),
typeBuilderEntry.getValue().idToDoc,
typeBuilderEntry.getValue().parentIdsValues.toArray(new BytesWrap[typeBuilderEntry.getValue().parentIdsValues.size()]),
typeBuilderEntry.getValue().parentIdsOrdinals));
}
SimpleIdReaderCache readerCache = new SimpleIdReaderCache(entry.getKey(), types.immutableMap());
idReaders.put(readerCache.readerCacheKey(), readerCache);
}
}
}
}
private BytesWrap checkIfCanReuse(Map<Object, Map<String, TypeBuilder>> builders, BytesWrap idAsBytes) {
BytesWrap finalIdAsBytes;
// go over and see if we can reuse this id
for (SimpleIdReaderCache idReaderCache : idReaders.values()) {
finalIdAsBytes = idReaderCache.canReuse(idAsBytes);
if (finalIdAsBytes != null) {
return finalIdAsBytes;
}
}
for (Map<String, TypeBuilder> map : builders.values()) {
for (TypeBuilder typeBuilder : map.values()) {
finalIdAsBytes = typeBuilder.canReuse(idAsBytes);
if (finalIdAsBytes != null) {
return finalIdAsBytes;
}
}
}
return idAsBytes;
}
private boolean refreshNeeded(IndexReader[] readers) {
boolean refreshNeeded = false;
for (IndexReader reader : readers) {
if (!idReaders.containsKey(reader.getFieldCacheKey())) {
refreshNeeded = true;
break;
}
}
return refreshNeeded;
}
static class TypeBuilder {
final ExtTObjectIntHasMap<BytesWrap> idToDoc = new ExtTObjectIntHasMap<BytesWrap>().defaultReturnValue(-1);
final ArrayList<BytesWrap> parentIdsValues = new ArrayList<BytesWrap>();
final int[] parentIdsOrdinals;
TypeBuilder(IndexReader reader) {
parentIdsOrdinals = new int[reader.maxDoc()];
// the first one indicates null value
parentIdsValues.add(null);
}
/**
* Returns an already stored instance if exists, if not, returns null;
*/
public BytesWrap canReuse(BytesWrap id) {
return idToDoc.key(id);
}
}
}

View File

@ -0,0 +1,77 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id.simple;
import org.elasticsearch.common.BytesWrap;
import org.elasticsearch.common.collect.ImmutableMap;
import org.elasticsearch.index.cache.id.IdReaderCache;
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
/**
* @author kimchy (shay.banon)
*/
public class SimpleIdReaderCache implements IdReaderCache {
private final Object readerCacheKey;
private final ImmutableMap<String, SimpleIdReaderTypeCache> types;
public SimpleIdReaderCache(Object readerCacheKey, ImmutableMap<String, SimpleIdReaderTypeCache> types) {
this.readerCacheKey = readerCacheKey;
this.types = types;
}
@Override public Object readerCacheKey() {
return this.readerCacheKey;
}
@Override public IdReaderTypeCache type(String type) {
return types.get(type);
}
@Override public BytesWrap parentIdByDoc(String type, int docId) {
SimpleIdReaderTypeCache typeCache = types.get(type);
if (typeCache != null) {
return typeCache.parentIdByDoc(docId);
}
return null;
}
@Override public int docById(String type, BytesWrap id) {
SimpleIdReaderTypeCache typeCache = types.get(type);
if (typeCache != null) {
return typeCache.docById(id);
}
return -1;
}
/**
* Returns an already stored instance if exists, if not, returns null;
*/
public BytesWrap canReuse(BytesWrap id) {
for (SimpleIdReaderTypeCache typeCache : types.values()) {
BytesWrap wrap = typeCache.canReuse(id);
if (wrap != null) {
return wrap;
}
}
return null;
}
}

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id.simple;
import org.elasticsearch.common.BytesWrap;
import org.elasticsearch.common.trove.ExtTObjectIntHasMap;
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
/**
* @author kimchy (shay.banon)
*/
public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
private final String type;
private final ExtTObjectIntHasMap<BytesWrap> idToDoc;
private final BytesWrap[] parentIdsValues;
private final int[] parentIdsOrdinals;
public SimpleIdReaderTypeCache(String type, ExtTObjectIntHasMap<BytesWrap> idToDoc,
BytesWrap[] parentIdsValues, int[] parentIdsOrdinals) {
this.type = type;
this.idToDoc = idToDoc.defaultReturnValue(-1);
this.idToDoc.trimToSize();
this.parentIdsValues = parentIdsValues;
this.parentIdsOrdinals = parentIdsOrdinals;
}
public String type() {
return this.type;
}
public BytesWrap parentIdByDoc(int docId) {
return parentIdsValues[parentIdsOrdinals[docId]];
}
public int docById(BytesWrap id) {
return idToDoc.get(id);
}
/**
* Returns an already stored instance if exists, if not, returns null;
*/
public BytesWrap canReuse(BytesWrap id) {
return idToDoc.key(id);
}
}

View File

@ -300,6 +300,10 @@ public interface Engine extends IndexShardComponent, CloseableComponent {
return this.doc.routing();
}
public String parent() {
return this.doc.parent();
}
public Document doc() {
return this.doc.doc();
}
@ -363,6 +367,10 @@ public interface Engine extends IndexShardComponent, CloseableComponent {
return this.doc.routing();
}
public String parent() {
return this.doc.parent();
}
public byte[] source() {
return this.doc.source();
}

View File

@ -68,6 +68,8 @@ public interface DocumentMapper {
RoutingFieldMapper routingFieldMapper();
ParentFieldMapper parentFieldMapper();
DocumentFieldMappers mappers();
/**

View File

@ -0,0 +1,41 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.apache.lucene.index.Term;
import org.elasticsearch.common.util.concurrent.ThreadSafe;
/**
* @author kimchy (shay.banon)
*/
@ThreadSafe
public interface ParentFieldMapper extends FieldMapper<Uid>, InternalMapper {
public static final String NAME = "_parent";
/**
* The type of the parent doc.
*/
String type();
Term term(String type, String id);
Term term(String uid);
}

View File

@ -45,6 +45,8 @@ public class ParsedDocument {
private boolean mappersAdded;
private String parent;
public ParsedDocument(String uid, String id, String type, String routing, Document document, Analyzer analyzer, byte[] source, boolean mappersAdded) {
this.uid = uid;
this.id = id;
@ -84,6 +86,15 @@ public class ParsedDocument {
return this.source;
}
public ParsedDocument parent(String parent) {
this.parent = parent;
return this;
}
public String parent() {
return this.parent;
}
/**
* Has the parsed document caused for new mappings to be added.
*/

View File

@ -36,6 +36,8 @@ public class SourceToParse {
private String routing;
private String parentId;
public SourceToParse(byte[] source) {
this.source = source;
}
@ -62,6 +64,15 @@ public class SourceToParse {
return this;
}
public String parent() {
return this.parentId;
}
public SourceToParse parent(String parentId) {
this.parentId = parentId;
return this;
}
public String routing() {
return this.routing;
}

View File

@ -43,6 +43,28 @@ public final class Uid {
return id;
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Uid uid = (Uid) o;
if (id != null ? !id.equals(uid.id) : uid.id != null) return false;
if (type != null ? !type.equals(uid.type) : uid.type != null) return false;
return true;
}
@Override public int hashCode() {
int result = type != null ? type.hashCode() : 0;
result = 31 * result + (id != null ? id.hashCode() : 0);
return result;
}
@Override public String toString() {
return type + DELIMITER + id;
}
public static Uid createUid(String uid) {
int delimiterIndex = uid.lastIndexOf(DELIMITER);
return new Uid(uid.substring(0, delimiterIndex), uid.substring(delimiterIndex + 1));

View File

@ -0,0 +1,147 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.xcontent;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.Term;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MergeMappingException;
import org.elasticsearch.index.mapper.Uid;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements org.elasticsearch.index.mapper.ParentFieldMapper {
public static final String CONTENT_TYPE = "_parent";
public static class Defaults extends AbstractFieldMapper.Defaults {
public static final String NAME = org.elasticsearch.index.mapper.ParentFieldMapper.NAME;
public static final Field.Index INDEX = Field.Index.NOT_ANALYZED;
public static final boolean OMIT_NORMS = true;
public static final boolean OMIT_TERM_FREQ_AND_POSITIONS = true;
}
public static class Builder extends XContentMapper.Builder<Builder, ParentFieldMapper> {
protected String indexName;
private String type;
public Builder() {
super(Defaults.NAME);
this.indexName = name;
}
public Builder type(String type) {
this.type = type;
return builder;
}
@Override public ParentFieldMapper build(BuilderContext context) {
if (type == null) {
throw new MapperParsingException("Parent mapping must contain the parent type");
}
return new ParentFieldMapper(name, indexName, type);
}
}
private final String type;
protected ParentFieldMapper(String name, String indexName, String type) {
super(new Names(name, indexName, indexName, name), Defaults.INDEX, Field.Store.YES, Defaults.TERM_VECTOR, Defaults.BOOST,
Defaults.OMIT_NORMS, Defaults.OMIT_TERM_FREQ_AND_POSITIONS, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER);
this.type = type;
}
@Override public String type() {
return type;
}
@Override protected Field parseCreateField(ParseContext context) throws IOException {
if (context.parser().currentName() != null && context.parser().currentName().equals(Defaults.NAME)) {
// we are in the parsing of _parent phase
String parentId = context.parser().text();
return new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), store, index);
}
// otherwise, we are running it post processing of the xcontent
String parsedParentId = context.doc().get(Defaults.NAME);
if (context.externalValueSet()) {
String parentId = (String) context.externalValue();
if (parsedParentId == null) {
if (parentId == null) {
throw new MapperParsingException("No parent id provided, not within the document, and not externally");
}
// we did not add it in the parsing phase, add it now
return new Field(names.indexName(), Uid.createUid(context.stringBuilder(), type, parentId), store, index);
} else if (parentId != null && !parsedParentId.equals(Uid.createUid(context.stringBuilder(), type, parentId))) {
throw new MapperParsingException("Parent id mismatch, document value is [" + Uid.createUid(parsedParentId).id() + "], while external value is [" + parentId + "]");
}
}
// we have parent mapping, yet no value was set, ignore it...
return null;
}
@Override public Uid value(Fieldable field) {
return Uid.createUid(field.stringValue());
}
@Override public Uid valueFromString(String value) {
return Uid.createUid(value);
}
@Override public String valueAsString(Fieldable field) {
return field.stringValue();
}
@Override public String indexedValue(String value) {
if (value.indexOf(Uid.DELIMITER) == -1) {
return Uid.createUid(type, value);
}
return value;
}
@Override public Term term(String type, String id) {
return term(Uid.createUid(type, id));
}
@Override public Term term(String uid) {
return new Term(names.indexName(), uid);
}
@Override protected String contentType() {
return CONTENT_TYPE;
}
@Override public void toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(CONTENT_TYPE);
builder.field("type", type);
builder.endObject();
}
@Override public void merge(XContentMapper mergeWith, MergeContext mergeContext) throws MergeMappingException {
// do nothing here, no merging, but also no exception
}
}

View File

@ -85,6 +85,8 @@ public class ParseContext {
this.parser = parser;
this.document = document;
this.analyzer = null;
this.uid = null;
this.id = null;
this.type = type;
this.source = source;
this.path.reset();

View File

@ -73,7 +73,7 @@ public class RoutingFieldMapper extends AbstractFieldMapper<String> implements o
}
}
private final boolean required;
private boolean required;
private final String path;
@ -88,6 +88,10 @@ public class RoutingFieldMapper extends AbstractFieldMapper<String> implements o
this.path = path;
}
public void markAsRequired() {
this.required = required;
}
@Override public boolean required() {
return this.required;
}

View File

@ -62,6 +62,8 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
private AnalyzerMapper analyzerMapper = new AnalyzerMapper();
private ParentFieldMapper parentFieldMapper = null;
private NamedAnalyzer indexAnalyzer;
private NamedAnalyzer searchAnalyzer;
@ -114,6 +116,11 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
return this;
}
public Builder parentFiled(ParentFieldMapper.Builder builder) {
this.parentFieldMapper = builder.build(builderContext);
return this;
}
public Builder boostField(BoostFieldMapper.Builder builder) {
this.boostFieldMapper = builder.build(builderContext);
return this;
@ -150,7 +157,7 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
public XContentDocumentMapper build(XContentDocumentMapperParser docMapperParser) {
Preconditions.checkNotNull(rootObjectMapper, "Mapper builder must have the root object mapper set");
return new XContentDocumentMapper(index, docMapperParser, rootObjectMapper, meta, uidFieldMapper, idFieldMapper, typeFieldMapper, indexFieldMapper,
sourceFieldMapper, routingFieldMapper, allFieldMapper, analyzerMapper, indexAnalyzer, searchAnalyzer, boostFieldMapper);
sourceFieldMapper, parentFieldMapper, routingFieldMapper, allFieldMapper, analyzerMapper, indexAnalyzer, searchAnalyzer, boostFieldMapper);
}
}
@ -183,6 +190,8 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
private final RoutingFieldMapper routingFieldMapper;
private final ParentFieldMapper parentFieldMapper;
private final BoostFieldMapper boostFieldMapper;
private final AllFieldMapper allFieldMapper;
@ -211,6 +220,7 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
TypeFieldMapper typeFieldMapper,
IndexFieldMapper indexFieldMapper,
SourceFieldMapper sourceFieldMapper,
@Nullable ParentFieldMapper parentFieldMapper,
RoutingFieldMapper routingFieldMapper,
AllFieldMapper allFieldMapper,
AnalyzerMapper analyzerMapper,
@ -226,6 +236,7 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
this.typeFieldMapper = typeFieldMapper;
this.indexFieldMapper = indexFieldMapper;
this.sourceFieldMapper = sourceFieldMapper;
this.parentFieldMapper = parentFieldMapper;
this.routingFieldMapper = routingFieldMapper;
this.allFieldMapper = allFieldMapper;
this.analyzerMapper = analyzerMapper;
@ -245,6 +256,11 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
if (boostFieldMapper != null) {
rootObjectMapper.putMapper(boostFieldMapper);
}
if (parentFieldMapper != null) {
rootObjectMapper.putMapper(parentFieldMapper);
// also, mark the routing as required!
routingFieldMapper.markAsRequired();
}
rootObjectMapper.putMapper(routingFieldMapper);
final List<FieldMapper> tempFieldMappers = newArrayList();
@ -316,6 +332,10 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
return this.routingFieldMapper;
}
@Override public org.elasticsearch.index.mapper.ParentFieldMapper parentFieldMapper() {
return this.parentFieldMapper;
}
@Override public Analyzer indexAnalyzer() {
return this.indexAnalyzer;
}
@ -406,6 +426,10 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
context.parsedId(ParseContext.ParsedIdState.EXTERNAL);
idFieldMapper.parse(context);
}
if (parentFieldMapper != null) {
context.externalValue(source.parent());
parentFieldMapper.parse(context);
}
analyzerMapper.parse(context);
allFieldMapper.parse(context);
// validate aggregated mappers (TODO: need to be added as a phase to any field mapper)
@ -417,7 +441,8 @@ public class XContentDocumentMapper implements DocumentMapper, ToXContent {
parser.close();
}
}
ParsedDocument doc = new ParsedDocument(context.uid(), context.id(), context.type(), source.routing(), context.doc(), context.analyzer(), source.source(), context.mappersAdded());
ParsedDocument doc = new ParsedDocument(context.uid(), context.id(), context.type(), source.routing(), context.doc(), context.analyzer(),
source.source(), context.mappersAdded()).parent(source.parent());
// reset the context to free up memory
context.reset(null, null, null, null, null);
return doc;

View File

@ -106,6 +106,7 @@ public class XContentDocumentMapperParser extends AbstractIndexComponent impleme
return parse(type, source, null);
}
@SuppressWarnings({"unchecked"})
@Override public XContentDocumentMapper parse(@Nullable String type, String source, String defaultSource) throws MapperParsingException {
Map<String, Object> mapping = null;
if (source != null) {
@ -148,6 +149,8 @@ public class XContentDocumentMapperParser extends AbstractIndexComponent impleme
docBuilder.uidField(parseUidField((Map<String, Object>) fieldNode, parserContext));
} else if (RoutingFieldMapper.CONTENT_TYPE.equals(fieldName)) {
docBuilder.routingField(parseRoutingField((Map<String, Object>) fieldNode, parserContext));
} else if (ParentFieldMapper.CONTENT_TYPE.equals(fieldName)) {
docBuilder.parentFiled(parseParentField((Map<String, Object>) fieldNode, parserContext));
} else if (BoostFieldMapper.CONTENT_TYPE.equals(fieldName) || "boostField".equals(fieldName)) {
docBuilder.boostField(parseBoostField((Map<String, Object>) fieldNode, parserContext));
} else if (AllFieldMapper.CONTENT_TYPE.equals(fieldName) || "allField".equals(fieldName)) {
@ -230,6 +233,18 @@ public class XContentDocumentMapperParser extends AbstractIndexComponent impleme
return builder;
}
private ParentFieldMapper.Builder parseParentField(Map<String, Object> parentNode, XContentMapper.TypeParser.ParserContext parserContext) {
ParentFieldMapper.Builder builder = new ParentFieldMapper.Builder();
for (Map.Entry<String, Object> entry : parentNode.entrySet()) {
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
if (fieldName.equals("type")) {
builder.type(fieldNode.toString());
}
}
return builder;
}
private AnalyzerMapper.Builder parseAnalyzerField(Map<String, Object> analyzerNode, XContentMapper.TypeParser.ParserContext parserContext) {
AnalyzerMapper.Builder builder = analyzer();
for (Map.Entry<String, Object> entry : analyzerNode.entrySet()) {
@ -292,6 +307,7 @@ public class XContentDocumentMapperParser extends AbstractIndexComponent impleme
return builder;
}
@SuppressWarnings({"unchecked"})
private Tuple<String, Map<String, Object>> extractMapping(String type, String source) throws MapperParsingException {
Map<String, Object> root;
XContentParser xContentParser = null;

View File

@ -220,6 +220,8 @@ public class IndexQueryParserModule extends AbstractModule {
private static class DefaultQueryProcessors extends QueryParsersProcessor {
@Override public void processXContentQueryParsers(XContentQueryParsersBindings bindings) {
bindings.processXContentQueryParser(HasChildQueryParser.NAME, HasChildQueryParser.class);
bindings.processXContentQueryParser(TopChildrenQueryParser.NAME, TopChildrenQueryParser.class);
bindings.processXContentQueryParser(DisMaxQueryParser.NAME, DisMaxQueryParser.class);
bindings.processXContentQueryParser(MatchAllQueryParser.NAME, MatchAllQueryParser.class);
bindings.processXContentQueryParser(QueryStringQueryParser.NAME, QueryStringQueryParser.class);
@ -246,6 +248,7 @@ public class IndexQueryParserModule extends AbstractModule {
}
@Override public void processXContentFilterParsers(XContentFilterParsersBindings bindings) {
bindings.processXContentQueryFilter(HasChildFilterParser.NAME, HasChildFilterParser.class);
bindings.processXContentQueryFilter(TermFilterParser.NAME, TermFilterParser.class);
bindings.processXContentQueryFilter(TermsFilterParser.NAME, TermsFilterParser.class);
bindings.processXContentQueryFilter(RangeFilterParser.NAME, RangeFilterParser.class);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.collect.ImmutableMap;
import org.elasticsearch.search.internal.ScopePhase;
/**
* The result of parsing a query.
@ -34,14 +35,18 @@ public class ParsedQuery {
private final ImmutableMap<String, Filter> namedFilters;
public ParsedQuery(Query query, ImmutableMap<String, Filter> namedFilters) {
private final ScopePhase[] scopePhases;
public ParsedQuery(Query query, ImmutableMap<String, Filter> namedFilters, ScopePhase[] scopePhases) {
this.query = query;
this.namedFilters = namedFilters;
this.scopePhases = scopePhases;
}
public ParsedQuery(Query query, ParsedQuery parsedQuery) {
this.query = query;
this.namedFilters = parsedQuery.namedFilters;
this.scopePhases = parsedQuery.scopePhases;
}
/**
@ -54,4 +59,8 @@ public class ParsedQuery {
public ImmutableMap<String, Filter> namedFilters() {
return this.namedFilters;
}
public ScopePhase[] scopePhases() {
return this.scopePhases;
}
}

View File

@ -0,0 +1,95 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.type.child;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.OpenBitSet;
import org.elasticsearch.common.BytesWrap;
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* @author kimchy (shay.banon)
*/
public class ChildCollector extends Collector {
private final String parentType;
private final SearchContext context;
private final Map<Object, IdReaderTypeCache> typeCacheMap;
private final Map<Object, OpenBitSet> parentDocs;
private IdReaderTypeCache typeCache;
public ChildCollector(String parentType, SearchContext context) {
this.parentType = parentType;
this.context = context;
this.parentDocs = new HashMap<Object, OpenBitSet>();
// create a specific type map lookup for faster lookup operations per doc
this.typeCacheMap = new HashMap<Object, IdReaderTypeCache>(context.searcher().subReaders().length);
for (IndexReader indexReader : context.searcher().subReaders()) {
typeCacheMap.put(indexReader.getFieldCacheKey(), context.idCache().reader(indexReader).type(parentType));
}
}
public Map<Object, OpenBitSet> parentDocs() {
return this.parentDocs;
}
@Override public void setScorer(Scorer scorer) throws IOException {
}
@Override public void collect(int doc) throws IOException {
BytesWrap parentId = typeCache.parentIdByDoc(doc);
if (parentId == null) {
return;
}
for (IndexReader indexReader : context.searcher().subReaders()) {
int parentDocId = typeCacheMap.get(indexReader.getFieldCacheKey()).docById(parentId);
if (parentDocId != -1 && !indexReader.isDeleted(parentDocId)) {
OpenBitSet docIdSet = parentDocs().get(indexReader.getFieldCacheKey());
if (docIdSet == null) {
docIdSet = new OpenBitSet(indexReader.maxDoc());
parentDocs.put(indexReader.getFieldCacheKey(), docIdSet);
}
docIdSet.fastSet(parentDocId);
return;
}
}
}
@Override public void setNextReader(IndexReader reader, int docBase) throws IOException {
typeCache = typeCacheMap.get(reader.getFieldCacheKey());
}
@Override public boolean acceptsDocsOutOfOrder() {
return true;
}
}

View File

@ -0,0 +1,96 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.type.child;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.OpenBitSet;
import org.elasticsearch.search.internal.ScopePhase;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Map;
/**
* @author kimchy (shay.banon)
*/
public class HasChildFilter extends Filter implements ScopePhase.CollectorPhase {
private Query query;
private String scope;
private String parentType;
private String childType;
private final SearchContext searchContext;
private Map<Object, OpenBitSet> parentDocs;
public HasChildFilter(Query query, String scope, String childType, String parentType, SearchContext searchContext) {
this.query = query;
this.scope = scope;
this.parentType = parentType;
this.childType = childType;
this.searchContext = searchContext;
}
@Override public Query query() {
return query;
}
@Override public boolean requiresProcessing() {
return parentDocs == null;
}
@Override public Collector collector() {
return new ChildCollector(parentType, searchContext);
}
@Override public void processCollector(Collector collector) {
this.parentDocs = ((ChildCollector) collector).parentDocs();
}
@Override public String scope() {
return this.scope;
}
@Override public void clear() {
parentDocs = null;
}
@Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
DocIdSet docIdSet = parentDocs.get(reader.getFieldCacheKey());
if (docIdSet == null) {
return DocIdSet.EMPTY_DOCIDSET;
}
return docIdSet;
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("child_filter[").append(childType).append("/").append(parentType).append("](").append(query).append(')');
return sb.toString();
}
}

View File

@ -0,0 +1,300 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.type.child;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.ToStringUtils;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.ElasticSearchIllegalStateException;
import org.elasticsearch.common.BytesWrap;
import org.elasticsearch.common.lucene.search.EmptyScorer;
import org.elasticsearch.common.trove.TIntObjectHashMap;
import org.elasticsearch.search.internal.ScopePhase;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.*;
/**
* @author kimchy (shay.banon)
*/
public class TopChildrenQuery extends Query implements ScopePhase.TopDocsPhase {
public static enum ScoreType {
MAX,
AVG,
SUM;
public static ScoreType fromString(String type) {
if ("max".equals(type)) {
return MAX;
} else if ("avg".equals(type)) {
return AVG;
} else if ("sum".equals(type)) {
return SUM;
}
throw new ElasticSearchIllegalArgumentException("No score type for child query [" + type + "] found");
}
}
private Query query;
private String scope;
private String parentType;
private String childType;
private ScoreType scoreType;
private int factor;
private int incrementalFactor;
private Map<Object, ParentDoc[]> parentDocs;
private int numHits = 0;
// Note, the query is expected to already be filtered to only child type docs
public TopChildrenQuery(Query query, String scope, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor) {
this.query = query;
this.scope = scope;
this.childType = childType;
this.parentType = parentType;
this.scoreType = scoreType;
this.factor = factor;
this.incrementalFactor = incrementalFactor;
}
@Override public Query query() {
return this;
}
@Override public String scope() {
return scope;
}
@Override public void clear() {
parentDocs = null;
numHits = 0;
}
@Override public int numHits() {
return numHits;
}
@Override public int factor() {
return this.factor;
}
@Override public int incrementalFactor() {
return this.incrementalFactor;
}
@Override public void processResults(TopDocs topDocs, SearchContext context) {
Map<Object, TIntObjectHashMap<ParentDoc>> parentDocsPerReader = new HashMap<Object, TIntObjectHashMap<ParentDoc>>();
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int readerIndex = context.searcher().readerIndex(scoreDoc.doc);
IndexReader subReader = context.searcher().subReaders()[readerIndex];
int subDoc = scoreDoc.doc - context.searcher().docStarts()[readerIndex];
// find the parent id
BytesWrap parentId = context.idCache().reader(subReader).parentIdByDoc(parentType, subDoc);
if (parentId == null) {
// no parent found
continue;
}
// now go over and find the parent doc Id and reader tuple
for (IndexReader indexReader : context.searcher().subReaders()) {
int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
if (parentDocId != -1 && !indexReader.isDeleted(parentDocId)) {
// we found a match, add it and break
TIntObjectHashMap<ParentDoc> readerParentDocs = parentDocsPerReader.get(indexReader.getFieldCacheKey());
if (readerParentDocs == null) {
readerParentDocs = new TIntObjectHashMap<ParentDoc>();
parentDocsPerReader.put(indexReader.getFieldCacheKey(), readerParentDocs);
}
ParentDoc parentDoc = readerParentDocs.get(parentDocId);
if (parentDoc == null) {
numHits++; // we have a hit on a parent
parentDoc = new ParentDoc();
parentDoc.docId = parentDocId;
parentDoc.count = 1;
parentDoc.maxScore = scoreDoc.score;
parentDoc.sumScores = scoreDoc.score;
readerParentDocs.put(parentDocId, parentDoc);
} else {
parentDoc.count++;
parentDoc.sumScores += scoreDoc.score;
if (scoreDoc.score > parentDoc.maxScore) {
parentDoc.maxScore = scoreDoc.score;
}
}
}
}
}
this.parentDocs = new HashMap<Object, ParentDoc[]>();
for (Map.Entry<Object, TIntObjectHashMap<ParentDoc>> entry : parentDocsPerReader.entrySet()) {
ParentDoc[] values = entry.getValue().getValues(new ParentDoc[entry.getValue().size()]);
Arrays.sort(values, PARENT_DOC_COMP);
parentDocs.put(entry.getKey(), values);
}
}
private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator();
static class ParentDocComparator implements Comparator<ParentDoc> {
@Override public int compare(ParentDoc o1, ParentDoc o2) {
return o1.docId - o2.docId;
}
}
public static class ParentDoc {
public int docId;
public int count;
public float maxScore = Float.NaN;
public float sumScores = 0;
}
@Override public Query rewrite(IndexReader reader) throws IOException {
Query newQ = query.rewrite(reader);
if (newQ == query) return this;
TopChildrenQuery bq = (TopChildrenQuery) this.clone();
bq.query = newQ;
return bq;
}
@Override public void extractTerms(Set<Term> terms) {
query.extractTerms(terms);
}
@Override public Weight createWeight(Searcher searcher) throws IOException {
if (parentDocs != null) {
return new ParentWeight(searcher, query.weight(searcher));
}
return query.weight(searcher);
}
public String toString(String field) {
StringBuilder sb = new StringBuilder();
sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(query.toString(field)).append(')');
sb.append(ToStringUtils.boost(getBoost()));
return sb.toString();
}
class ParentWeight extends Weight {
final Searcher searcher;
final Weight queryWeight;
public ParentWeight(Searcher searcher, Weight queryWeight) throws IOException {
this.searcher = searcher;
this.queryWeight = queryWeight;
}
public Query getQuery() {
return TopChildrenQuery.this;
}
public float getValue() {
return getBoost();
}
@Override
public float sumOfSquaredWeights() throws IOException {
float sum = queryWeight.sumOfSquaredWeights();
sum *= getBoost() * getBoost();
return sum;
}
@Override
public void normalize(float norm) {
// nothing to do here....
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
ParentDoc[] readerParentDocs = parentDocs.get(reader.getFieldCacheKey());
if (readerParentDocs != null) {
return new ParentScorer(getSimilarity(searcher), readerParentDocs);
}
return new EmptyScorer(getSimilarity(searcher));
}
@Override
public Explanation explain(IndexReader reader, int doc) throws IOException {
return new Explanation(getBoost(), "not implemented yet...");
}
}
class ParentScorer extends Scorer {
private final ParentDoc[] docs;
private int index = -1;
private ParentScorer(Similarity similarity, ParentDoc[] docs) throws IOException {
super(similarity);
this.docs = docs;
}
@Override
public int docID() {
if (index >= docs.length) {
return NO_MORE_DOCS;
}
return docs[index].docId;
}
@Override
public int advance(int target) throws IOException {
int doc;
while ((doc = nextDoc()) < target) {
}
return doc;
}
@Override
public int nextDoc() throws IOException {
if (++index >= docs.length) {
return NO_MORE_DOCS;
}
return docs[index].docId;
}
@Override
public float score() throws IOException {
if (scoreType == ScoreType.MAX) {
return docs[index].maxScore;
} else if (scoreType == ScoreType.AVG) {
return docs[index].sumScores / docs[index].count;
} else if (scoreType == ScoreType.SUM) {
return docs[index].sumScores;
}
throw new ElasticSearchIllegalStateException("No support for score type [" + scoreType + "]");
}
}
}

View File

@ -233,7 +233,7 @@ public abstract class FilterBuilders {
*
* @param name The name of the field
*/
public static ExistsFilterBuilder exists(String name) {
public static ExistsFilterBuilder existsFilter(String name) {
return new ExistsFilterBuilder(name);
}
@ -242,10 +242,21 @@ public abstract class FilterBuilders {
*
* @param name The name of the field
*/
public static MissingFilterBuilder missing(String name) {
public static MissingFilterBuilder missingFilter(String name) {
return new MissingFilterBuilder(name);
}
/**
* Constructs a child filter, with the child type and the query to run against child documents, with
* the result of the filter being the *parent* documents.
*
* @param type The child type
* @param query The query to run against the child type
*/
public static HasChildFilterBuilder hasChildFilter(String type, XContentQueryBuilder query) {
return new HasChildFilterBuilder(type, query);
}
public static BoolFilterBuilder boolFilter() {
return new BoolFilterBuilder();
}

View File

@ -0,0 +1,71 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class HasChildFilterBuilder extends BaseFilterBuilder {
private final XContentQueryBuilder queryBuilder;
private String childType;
private String scope;
private String filterName;
public HasChildFilterBuilder(String type, XContentQueryBuilder queryBuilder) {
this.childType = type;
this.queryBuilder = queryBuilder;
}
public HasChildFilterBuilder scope(String scope) {
this.scope = scope;
return this;
}
/**
* Sets the filter name for the filter that can be used when searching for matched_filters per hit.
*/
public HasChildFilterBuilder filterName(String filterName) {
this.filterName = filterName;
return this;
}
@Override protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(HasChildFilterParser.NAME);
builder.field("query");
queryBuilder.toXContent(builder, params);
builder.field("type", childType);
if (scope != null) {
builder.field("scope", scope);
}
if (filterName != null) {
builder.field("_name", filterName);
}
builder.endObject();
}
}

View File

@ -0,0 +1,108 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.type.child.HasChildFilter;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class HasChildFilterParser extends AbstractIndexComponent implements XContentFilterParser {
public static final String NAME = "has_child";
@Inject public HasChildFilterParser(Index index, @IndexSettings Settings settings) {
super(index, settings);
}
@Override public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
String childType = null;
String scope = null;
String filterName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
query = parseContext.parseInnerQuery();
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName)) {
childType = parser.text();
} else if ("scope".equals(currentFieldName)) {
scope = parser.text();
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
}
}
}
if (query == null) {
throw new QueryParsingException(index, "[child] filter requires 'query' field");
}
if (childType == null) {
throw new QueryParsingException(index, "[child] filter requires 'type' field");
}
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(index, "No mapping for for type [" + childType + "]");
}
if (childDocMapper.parentFieldMapper() == null) {
throw new QueryParsingException(index, "Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
// wrap the query with type query
query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter()));
HasChildFilter childFilter = new HasChildFilter(query, scope, childType, parentType, SearchContext.current());
parseContext.addScopePhase(childFilter);
if (filterName != null) {
parseContext.addNamedFilter(filterName, childFilter);
}
return childFilter;
}
}

View File

@ -0,0 +1,75 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class HasChildQueryBuilder extends BaseQueryBuilder {
private final XContentQueryBuilder queryBuilder;
private String childType;
private String scope;
private float boost = 1.0f;
public HasChildQueryBuilder(String type, XContentQueryBuilder queryBuilder) {
this.childType = type;
this.queryBuilder = queryBuilder;
}
/**
* The scope of the query, which can later be used, for example, to run facets against the child docs that
* matches the query.
*/
public HasChildQueryBuilder scope(String scope) {
this.scope = scope;
return this;
}
/**
* Sets the boost for this query. Documents matching this query will (in addition to the normal
* weightings) have their score multiplied by the boost provided.
*/
public HasChildQueryBuilder boost(float boost) {
this.boost = boost;
return this;
}
@Override protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(HasChildQueryParser.NAME);
builder.field("query");
queryBuilder.toXContent(builder, params);
builder.field("type", childType);
if (scope != null) {
builder.field("scope", scope);
}
if (boost != 1.0f) {
builder.field("boost", boost);
}
builder.endObject();
}
}

View File

@ -0,0 +1,106 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.type.child.HasChildFilter;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class HasChildQueryParser extends AbstractIndexComponent implements XContentQueryParser {
public static final String NAME = "has_child";
@Inject public HasChildQueryParser(Index index, @IndexSettings Settings settings) {
super(index, settings);
}
@Override public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
float boost = 1.0f;
String childType = null;
String scope = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
query = parseContext.parseInnerQuery();
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName)) {
childType = parser.text();
} else if ("scope".equals(currentFieldName)) {
scope = parser.text();
}
}
}
if (query == null) {
throw new QueryParsingException(index, "[has_child] requires 'query' field");
}
if (childType == null) {
throw new QueryParsingException(index, "[has_child] requires 'type' field");
}
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(index, "[has_child] No mapping for for type [" + childType + "]");
}
if (childDocMapper.parentFieldMapper() == null) {
throw new QueryParsingException(index, "[has_child] Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
query.setBoost(boost);
// wrap the query with type query
query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter()));
HasChildFilter childFilter = new HasChildFilter(query, scope, childType, parentType, SearchContext.current());
// we don't need DeletionAwareConstantScore, since we filter deleted parent docs in the filter
ConstantScoreQuery childQuery = new ConstantScoreQuery(childFilter);
childQuery.setBoost(boost);
parseContext.addScopePhase(childFilter);
return childQuery;
}
}

View File

@ -400,6 +400,28 @@ public abstract class QueryBuilders {
return new MoreLikeThisFieldQueryBuilder(name);
}
/**
* Constructs a new scoring child query, with the child type and the query to run on the child documents. The
* results of this query are the parent docs that those child docs matched.
*
* @param type The child type.
* @param query The query.
*/
public static TopChildrenQueryBuilder topChildrenQuery(String type, XContentQueryBuilder query) {
return new TopChildrenQueryBuilder(type, query);
}
/**
* Constructs a new NON scoring child query, with the child type and the query to run on the child documents. The
* results of this query are the parent docs that those child docs matched.
*
* @param type The child type.
* @param query The query.
*/
public static HasChildQueryBuilder hasChildQuery(String type, XContentQueryBuilder query) {
return new HasChildQueryBuilder(type, query);
}
private QueryBuilders() {
}

View File

@ -27,6 +27,7 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Similarity;
import org.elasticsearch.common.collect.ImmutableMap;
import org.elasticsearch.common.collect.Lists;
import org.elasticsearch.common.collect.Maps;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
@ -38,9 +39,11 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.internal.ScopePhase;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
@ -54,6 +57,8 @@ public class QueryParseContext {
private final Map<String, Filter> namedFilters = Maps.newHashMap();
private final List<ScopePhase> scopePhases = Lists.newArrayList();
private final MapperQueryParser queryParser = new MapperQueryParser(this);
private final MultiFieldMapperQueryParser multiFieldQueryParser = new MultiFieldMapperQueryParser(this);
@ -68,6 +73,7 @@ public class QueryParseContext {
public void reset(XContentParser jp) {
this.parser = jp;
this.namedFilters.clear();
this.scopePhases.clear();
}
public XContentParser parser() {
@ -127,6 +133,19 @@ public class QueryParseContext {
return ImmutableMap.copyOf(namedFilters);
}
public void addScopePhase(ScopePhase scopePhase) {
scopePhases.add(scopePhase);
}
private static ScopePhase[] EMPTY_SCOPE_PHASES = new ScopePhase[0];
public ScopePhase[] copyScopePhases() {
if (scopePhases.isEmpty()) {
return EMPTY_SCOPE_PHASES;
}
return scopePhases.toArray(new ScopePhase[scopePhases.size()]);
}
public Query parseInnerQuery() throws IOException, QueryParsingException {
// move to START object
XContentParser.Token token;

View File

@ -0,0 +1,116 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class TopChildrenQueryBuilder extends BaseQueryBuilder {
private final XContentQueryBuilder queryBuilder;
private String childType;
private String scope;
private String score;
private float boost = 1.0f;
private int factor = -1;
private int incrementalFactor = -1;
public TopChildrenQueryBuilder(String type, XContentQueryBuilder queryBuilder) {
this.childType = type;
this.queryBuilder = queryBuilder;
}
/**
* The scope of the query, which can later be used, for example, to run facets against the child docs that
* matches the query.
*/
public TopChildrenQueryBuilder scope(String scope) {
this.scope = scope;
return this;
}
/**
* How to compute the score. Possible values are: <tt>max</tt>, <tt>sum</tt>, or <tt>avg</tt>. Defaults
* to <tt>max</tt>.
*/
public TopChildrenQueryBuilder score(String score) {
this.score = score;
return this;
}
/**
* Controls the multiplication factor of the initial hits required from the child query over the main query request.
* Defaults to 5.
*/
public TopChildrenQueryBuilder factor(int factor) {
this.factor = factor;
return this;
}
/**
* Sets the incremental factor when the query needs to be re-run in order to fetch more results. Defaults to 2.
*/
public TopChildrenQueryBuilder incrementalFactor(int incrementalFactor) {
this.incrementalFactor = incrementalFactor;
return this;
}
/**
* Sets the boost for this query. Documents matching this query will (in addition to the normal
* weightings) have their score multiplied by the boost provided.
*/
public TopChildrenQueryBuilder boost(float boost) {
this.boost = boost;
return this;
}
@Override protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(TopChildrenQueryParser.NAME);
builder.field("query");
queryBuilder.toXContent(builder, params);
builder.field("type", childType);
if (scope != null) {
builder.field("scope", scope);
}
if (score != null) {
builder.field("score", score);
}
if (boost != -1) {
builder.field("boost", boost);
}
if (factor != -1) {
builder.field("factor", factor);
}
if (incrementalFactor != -1) {
builder.field("incremental_factor", incrementalFactor);
}
builder.endObject();
}
}

View File

@ -0,0 +1,112 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query.xcontent;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.query.QueryParsingException;
import org.elasticsearch.index.query.type.child.TopChildrenQuery;
import org.elasticsearch.index.settings.IndexSettings;
import java.io.IOException;
/**
* @author kimchy (shay.banon)
*/
public class TopChildrenQueryParser extends AbstractIndexComponent implements XContentQueryParser {
public static final String NAME = "top_children";
@Inject public TopChildrenQueryParser(Index index, @IndexSettings Settings settings) {
super(index, settings);
}
@Override public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
float boost = 1.0f;
String childType = null;
String scope = null;
TopChildrenQuery.ScoreType scoreType = TopChildrenQuery.ScoreType.MAX;
int factor = 5;
int incrementalFactor = 2;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
query = parseContext.parseInnerQuery();
}
} else if (token.isValue()) {
if ("type".equals(currentFieldName)) {
childType = parser.text();
} else if ("scope".equals(currentFieldName)) {
scope = parser.text();
} else if ("score".equals(currentFieldName)) {
scoreType = TopChildrenQuery.ScoreType.fromString(parser.text());
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("factor".equals(currentFieldName)) {
factor = parser.intValue();
} else if ("incremental_factor".equals(currentFieldName) || "incrementalFactor".equals(currentFieldName)) {
incrementalFactor = parser.intValue();
}
}
}
if (query == null) {
throw new QueryParsingException(index, "[child] requires 'query' field");
}
if (childType == null) {
throw new QueryParsingException(index, "[child] requires 'type' field");
}
DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType);
if (childDocMapper == null) {
throw new QueryParsingException(index, "No mapping for for type [" + childType + "]");
}
if (childDocMapper.parentFieldMapper() == null) {
throw new QueryParsingException(index, "Type [" + childType + "] does not have parent mapping");
}
String parentType = childDocMapper.parentFieldMapper().type();
query.setBoost(boost);
// wrap the query with type query
query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter()));
TopChildrenQuery childQuery = new TopChildrenQuery(query, scope, childType, parentType, scoreType, factor, incrementalFactor);
parseContext.addScopePhase(childQuery);
return childQuery;
}
}

View File

@ -232,7 +232,7 @@ public class XContentIndexQueryParser extends AbstractIndexComponent implements
private ParsedQuery parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException {
parseContext.reset(parser);
Query query = parseContext.parseInnerQuery();
return new ParsedQuery(query, parseContext.copyNamedFilters());
return new ParsedQuery(query, parseContext.copyNamedFilters(), parseContext.copyScopePhases());
}
private void add(Map<String, XContentFilterParser> map, XContentFilterParser filterParser) {

View File

@ -461,11 +461,13 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
switch (operation.opType()) {
case CREATE:
Translog.Create create = (Translog.Create) operation;
engine.create(prepareCreate(source(create.source()).type(create.type()).id(create.id()).routing(create.routing())));
engine.create(prepareCreate(source(create.source()).type(create.type()).id(create.id())
.routing(create.routing()).parent(create.parent())));
break;
case SAVE:
Translog.Index index = (Translog.Index) operation;
engine.index(prepareIndex(source(index.source()).type(index.type()).id(index.id()).routing(index.routing())));
engine.index(prepareIndex(source(index.source()).type(index.type()).id(index.id())
.routing(index.routing()).parent(index.parent())));
break;
case DELETE:
Translog.Delete delete = (Translog.Delete) operation;

View File

@ -194,6 +194,7 @@ public interface Translog extends IndexShardComponent {
private String type;
private byte[] source;
private String routing;
private String parent;
public Create() {
}
@ -201,6 +202,7 @@ public interface Translog extends IndexShardComponent {
public Create(Engine.Create create) {
this(create.type(), create.id(), create.source());
this.routing = create.routing();
this.parent = create.parent();
}
public Create(String type, String id, byte[] source) {
@ -233,21 +235,30 @@ public interface Translog extends IndexShardComponent {
return this.routing;
}
public String parent() {
return this.parent;
}
@Override public void readFrom(StreamInput in) throws IOException {
int version = in.readVInt(); // version
id = in.readUTF();
type = in.readUTF();
source = new byte[in.readVInt()];
in.readFully(source);
if (version == 1) {
if (version >= 1) {
if (in.readBoolean()) {
routing = in.readUTF();
}
}
if (version >= 2) {
if (in.readBoolean()) {
parent = in.readUTF();
}
}
}
@Override public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(1); // version
out.writeVInt(2); // version
out.writeUTF(id);
out.writeUTF(type);
out.writeVInt(source.length);
@ -258,6 +269,12 @@ public interface Translog extends IndexShardComponent {
out.writeBoolean(true);
out.writeUTF(routing);
}
if (parent == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeUTF(parent);
}
}
}
@ -266,6 +283,7 @@ public interface Translog extends IndexShardComponent {
private String type;
private byte[] source;
private String routing;
private String parent;
public Index() {
}
@ -273,6 +291,7 @@ public interface Translog extends IndexShardComponent {
public Index(Engine.Index index) {
this(index.type(), index.id(), index.source());
this.routing = index.routing();
this.parent = index.parent();
}
public Index(String type, String id, byte[] source) {
@ -301,6 +320,10 @@ public interface Translog extends IndexShardComponent {
return this.routing;
}
public String parent() {
return this.parent;
}
public byte[] source() {
return this.source;
}
@ -311,15 +334,20 @@ public interface Translog extends IndexShardComponent {
type = in.readUTF();
source = new byte[in.readVInt()];
in.readFully(source);
if (version == 1) {
if (version >= 1) {
if (in.readBoolean()) {
routing = in.readUTF();
}
}
if (version >= 2) {
if (in.readBoolean()) {
parent = in.readUTF();
}
}
}
@Override public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(1); // version
out.writeVInt(2); // version
out.writeUTF(id);
out.writeUTF(type);
out.writeVInt(source.length);
@ -330,6 +358,12 @@ public interface Translog extends IndexShardComponent {
out.writeBoolean(true);
out.writeUTF(routing);
}
if (parent == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeUTF(parent);
}
}
}

View File

@ -61,6 +61,7 @@ public class RestIndexAction extends BaseRestHandler {
@Override public void handleRequest(final RestRequest request, final RestChannel channel) {
IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id"));
indexRequest.routing(request.param("routing"));
indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing
indexRequest.source(request.contentByteArray(), request.contentByteArrayOffset(), request.contentLength(), request.contentUnsafe());
indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT));
indexRequest.refresh(request.paramAsBoolean("refresh", indexRequest.refresh()));

View File

@ -422,7 +422,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
private class SearchQueryQueryFetchTransportHandler extends BaseTransportRequestHandler<QuerySearchRequest> {
static final String ACTION = "search/phase/queyr/query+fetch";
static final String ACTION = "search/phase/query/query+fetch";
@Override public QuerySearchRequest newInstance() {
return new QuerySearchRequest();

View File

@ -48,7 +48,8 @@ public class CachedDfSource extends Searcher {
public int docFreq(Term term) {
int df = dfs.dfMap().get(term);
if (df == -1) {
throw new IllegalArgumentException("df for term " + term + " not available");
return 1;
// throw new IllegalArgumentException("df for term " + term + " not available");
}
return df;
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.search.facet;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.xcontent.XContentFilterBuilder;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import java.io.IOException;
@ -32,7 +33,7 @@ public abstract class AbstractFacetBuilder implements ToXContent {
protected final String name;
protected Boolean global;
protected String scope;
protected XContentFilterBuilder facetFilter;
@ -45,8 +46,19 @@ public abstract class AbstractFacetBuilder implements ToXContent {
return this;
}
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
public AbstractFacetBuilder global(boolean global) {
this.global = global;
this.scope = ContextIndexSearcher.Scopes.GLOBAL;
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
public AbstractFacetBuilder scope(String scope) {
this.scope = scope;
return this;
}
@ -56,8 +68,8 @@ public abstract class AbstractFacetBuilder implements ToXContent {
facetFilter.toXContent(builder, params);
}
if (global != null) {
builder.field("global", global);
if (scope != null) {
builder.field("scope", scope);
}
}
}

View File

@ -37,8 +37,19 @@ public class FilterFacetBuilder extends AbstractFacetBuilder {
super(name);
}
public FilterFacetBuilder global(boolean global) {
this.global = global;
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
@Override public FilterFacetBuilder global(boolean global) {
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public FilterFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}
@ -60,14 +71,8 @@ public class FilterFacetBuilder extends AbstractFacetBuilder {
builder.field(FilterFacetCollectorParser.NAME);
filter.toXContent(builder, params);
if (facetFilter != null) {
builder.field("filter");
facetFilter.toXContent(builder, params);
}
addFilterFacetAndGlobal(builder, params);
if (global != null) {
builder.field("global", global);
}
builder.endObject();
}
}

View File

@ -196,8 +196,19 @@ public class GeoDistanceFacetBuilder extends AbstractFacetBuilder {
return this;
}
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
public GeoDistanceFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public GeoDistanceFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -101,7 +101,15 @@ public class HistogramFacetBuilder extends AbstractFacetBuilder {
* the search query). Defaults to <tt>false</tt>.
*/
public HistogramFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public HistogramFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -85,8 +85,19 @@ public class HistogramScriptFacetBuilder extends AbstractFacetBuilder {
return this;
}
public HistogramScriptFacetBuilder global(boolean global) {
this.global = global;
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
@Override public HistogramScriptFacetBuilder global(boolean global) {
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public HistogramScriptFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -38,8 +38,19 @@ public class QueryFacetBuilder extends AbstractFacetBuilder {
super(name);
}
public QueryFacetBuilder global(boolean global) {
this.global = global;
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
@Override public QueryFacetBuilder global(boolean global) {
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public QueryFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -129,7 +129,15 @@ public class RangeFacetBuilder extends AbstractFacetBuilder {
* the search query). Defaults to <tt>false</tt>.
*/
public RangeFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public RangeFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -103,8 +103,20 @@ public class RangeScriptFacetBuilder extends AbstractFacetBuilder {
}
/**
* Should the facet run in global mode (not bounded by the search query) or not (bounded by
* the search query). Defaults to <tt>false</tt>.
*/
public RangeScriptFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public RangeScriptFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -50,8 +50,19 @@ public class StatisticalFacetBuilder extends AbstractFacetBuilder {
return this;
}
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
public StatisticalFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public StatisticalFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -40,8 +40,19 @@ public class StatisticalScriptFacetBuilder extends AbstractFacetBuilder {
super(name);
}
/**
* Marks the facet to run in a global scope, not bounded by any query.
*/
public StatisticalScriptFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public AbstractFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -60,7 +60,15 @@ public class TermsFacetBuilder extends AbstractFacetBuilder {
* to <tt>false</tt>.
*/
public TermsFacetBuilder global(boolean global) {
this.global = global;
super.global(global);
return this;
}
/**
* Marks the facet to run in a specific scope.
*/
@Override public TermsFacetBuilder scope(String scope) {
super.scope(scope);
return this;
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.internal;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
/**
* @author kimchy (shay.banon)
*/
public interface ScopePhase {
String scope();
void clear();
Query query();
public interface TopDocsPhase extends ScopePhase {
void processResults(TopDocs topDocs, SearchContext context);
int numHits();
int factor();
int incrementalFactor();
}
public interface CollectorPhase extends ScopePhase {
boolean requiresProcessing();
Collector collector();
void processCollector(Collector collector);
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.cache.field.data.FieldDataCache;
import org.elasticsearch.index.cache.filter.FilterCache;
import org.elasticsearch.index.cache.id.IdCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParser;
@ -151,6 +152,12 @@ public class SearchContext implements Releasable {
}
@Override public boolean release() throws ElasticSearchException {
// clear and scope phase we have
if (parsedQuery() != null) {
for (ScopePhase scopePhase : parsedQuery().scopePhases()) {
scopePhase.clear();
}
}
// we should close this searcher, since its a new one we create each time, and we use the IndexReader
try {
searcher.close();
@ -262,6 +269,10 @@ public class SearchContext implements Releasable {
return indexService.cache().fieldData();
}
public IdCache idCache() {
return indexService.cache().idCache();
}
public TimeValue timeout() {
return timeout;
}

View File

@ -19,10 +19,7 @@
package org.elasticsearch.search.query;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.*;
import org.elasticsearch.common.collect.ImmutableMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
@ -33,6 +30,7 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.SearchPhase;
import org.elasticsearch.search.facet.FacetsPhase;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.internal.ScopePhase;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortParseElement;
@ -75,6 +73,79 @@ public class QueryPhase implements SearchPhase {
}
public void execute(SearchContext searchContext) throws QueryPhaseExecutionException {
if (searchContext.parsedQuery().scopePhases().length > 0) {
// we have scoped queries, refresh the id cache
try {
searchContext.idCache().refresh(searchContext.searcher().subReaders());
} catch (Exception e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to refresh id cache for child queries", e);
}
// process scoped queries (from the last to the first, working with the parsing option here)
for (int i = searchContext.parsedQuery().scopePhases().length - 1; i >= 0; i--) {
ScopePhase scopePhase = searchContext.parsedQuery().scopePhases()[i];
if (scopePhase instanceof ScopePhase.TopDocsPhase) {
ScopePhase.TopDocsPhase topDocsPhase = (ScopePhase.TopDocsPhase) scopePhase;
topDocsPhase.clear();
int numDocs = (searchContext.from() + searchContext.size());
if (numDocs == 0) {
numDocs = 1;
}
try {
numDocs *= topDocsPhase.factor();
while (true) {
if (topDocsPhase.scope() != null) {
searchContext.searcher().processingScope(topDocsPhase.scope());
}
TopDocs topDocs = searchContext.searcher().search(topDocsPhase.query(), numDocs);
if (topDocsPhase.scope() != null) {
// we mark the scope as processed, so we don't process it again, even if we need to rerun the query...
searchContext.searcher().processedScope();
}
topDocsPhase.processResults(topDocs, searchContext);
// check if we found enough docs, if so, break
if (topDocsPhase.numHits() >= (searchContext.from() + searchContext.size())) {
break;
}
// if we did not find enough docs, check if it make sense to search further
if (topDocs.totalHits <= numDocs) {
break;
}
// if not, update numDocs, and search again
numDocs *= topDocsPhase.incrementalFactor();
if (numDocs > topDocs.totalHits) {
numDocs = topDocs.totalHits;
}
}
} catch (Exception e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute child query [" + scopePhase.query() + "]", e);
}
} else if (scopePhase instanceof ScopePhase.CollectorPhase) {
try {
ScopePhase.CollectorPhase collectorPhase = (ScopePhase.CollectorPhase) scopePhase;
// collector phase might not require extra processing, for example, when scrolling
if (!collectorPhase.requiresProcessing()) {
continue;
}
if (scopePhase.scope() != null) {
searchContext.searcher().processingScope(scopePhase.scope());
}
Collector collector = collectorPhase.collector();
searchContext.searcher().search(collectorPhase.query(), collector);
collectorPhase.processCollector(collector);
if (collectorPhase.scope() != null) {
// we mark the scope as processed, so we don't process it again, even if we need to rerun the query...
searchContext.searcher().processedScope();
}
} catch (Exception e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute child query [" + scopePhase.query() + "]", e);
}
}
}
}
searchContext.searcher().processingScope(ContextIndexSearcher.Scopes.MAIN);
try {
searchContext.queryResult().from(searchContext.from());

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.xcontent.parent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.xcontent.MapperTests;
import org.elasticsearch.index.mapper.xcontent.XContentDocumentMapper;
import org.testng.annotations.Test;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
/**
* @author kimchy (shay.banon)
*/
public class ParentMappingTests {
@Test public void parentSetInDocNotExternally() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "p_type").endObject()
.endObject().endObject().string();
XContentDocumentMapper docMapper = MapperTests.newParser().parse(mapping);
ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
.startObject()
.field("_parent", "1122")
.field("x_field", "x_value")
.endObject()
.copiedBytes()).type("type").id("1"));
assertThat(doc.doc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
}
@Test public void parentNotSetInDocSetExternally() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "p_type").endObject()
.endObject().endObject().string();
XContentDocumentMapper docMapper = MapperTests.newParser().parse(mapping);
ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
.startObject()
.field("x_field", "x_value")
.endObject()
.copiedBytes()).type("type").id("1").parent("1122"));
assertThat(doc.doc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
}
@Test public void parentSetInDocSetExternally() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "p_type").endObject()
.endObject().endObject().string();
XContentDocumentMapper docMapper = MapperTests.newParser().parse(mapping);
ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
.startObject()
.field("_parent", "1122")
.field("x_field", "x_value")
.endObject()
.copiedBytes()).type("type").id("1").parent("1122"));
assertThat(doc.doc().get("_parent"), equalTo(Uid.createUid("p_type", "1122")));
}
}

View File

@ -0,0 +1,589 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.integration.search.child;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.search.facet.terms.TermsFacet;
import org.elasticsearch.test.integration.AbstractNodesTests;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import static org.elasticsearch.index.query.xcontent.FilterBuilders.*;
import static org.elasticsearch.index.query.xcontent.QueryBuilders.*;
import static org.elasticsearch.search.facet.FacetBuilders.*;
import static org.hamcrest.MatcherAssert.*;
import static org.hamcrest.Matchers.*;
/**
* @author kimchy (shay.banon)
*/
public class SimpleChildQuerySearchTests extends AbstractNodesTests {
private Client client;
@BeforeClass public void createNodes() throws Exception {
startNode("node1");
startNode("node2");
client = getClient();
}
@AfterClass public void closeNodes() {
client.close();
closeAllNodes();
}
protected Client getClient() {
return client("node1");
}
@Test public void simpleChildQuery() throws Exception {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// ignore
}
client.admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
// index simple data
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").execute().actionGet();
client.prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").execute().actionGet();
client.prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").execute().actionGet();
client.prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").execute().actionGet();
client.prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").execute().actionGet();
client.prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
// TOP CHILDREN QUERY
SearchResponse searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD QUERY
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD FILTER
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
}
@Test public void simpleChildQueryWithFlush() throws Exception {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// ignore
}
client.admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
// index simple data with flushes, so we have many segments
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
// TOP CHILDREN QUERY
SearchResponse searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD QUERY
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD FILTER
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
}
@Test public void simpleChildQueryWithFlushAnd3Shards() throws Exception {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// ignore
}
client.admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
// index simple data with flushes, so we have many segments
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").execute().actionGet();
client.admin().indices().prepareFlush().execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
// TOP CHILDREN QUERY
SearchResponse searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD QUERY
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
// HAS CHILD FILTER
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p2"));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "red")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
}
@Test public void testScopedFacet() throws Exception {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// ignore
}
client.admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
// index simple data
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").execute().actionGet();
client.prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").execute().actionGet();
client.prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").execute().actionGet();
client.prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").execute().actionGet();
client.prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").execute().actionGet();
client.prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
SearchResponse searchResponse = client.prepareSearch("test")
.setQuery(topChildrenQuery("child", boolQuery().should(termQuery("c_field", "red")).should(termQuery("c_field", "yellow"))).scope("child1"))
.addFacet(termsFacet("facet1").field("c_field").scope("child1"))
.execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
assertThat(searchResponse.facets().facets().size(), equalTo(1));
TermsFacet termsFacet = searchResponse.facets().facet("facet1");
assertThat(termsFacet.entries().size(), equalTo(2));
assertThat(termsFacet.entries().get(0).term(), equalTo("red"));
assertThat(termsFacet.entries().get(0).count(), equalTo(2));
assertThat(termsFacet.entries().get(1).term(), equalTo("yellow"));
assertThat(termsFacet.entries().get(1).count(), equalTo(1));
}
@Test public void testDeletedParent() throws Exception {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Exception e) {
// ignore
}
client.admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1)).execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
client.admin().indices().preparePutMapping("test").setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
// index simple data
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").execute().actionGet();
client.prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").execute().actionGet();
client.prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").execute().actionGet();
client.prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").execute().actionGet();
client.prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").execute().actionGet();
client.prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
// TOP CHILDREN QUERY
SearchResponse searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
assertThat(searchResponse.hits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
assertThat(searchResponse.hits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
// update p1 and see what that we get updated values...
client.prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1_updated").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
searchResponse = client.prepareSearch("test").setQuery(topChildrenQuery("child", termQuery("c_field", "yellow"))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
assertThat(searchResponse.hits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
searchResponse = client.prepareSearch("test").setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "yellow")))).execute().actionGet();
if (searchResponse.failedShards() > 0) {
logger.warn("Failed shards:");
for (ShardSearchFailure shardSearchFailure : searchResponse.shardFailures()) {
logger.warn("-> {}", shardSearchFailure);
}
}
assertThat(searchResponse.failedShards(), equalTo(0));
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("p1"));
assertThat(searchResponse.hits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
}
}

View File

@ -69,12 +69,12 @@ public class SimpleQueryTests extends AbstractNodesTests {
client.admin().indices().prepareRefresh().execute().actionGet();
SearchResponse searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), exists("field1"))).execute().actionGet();
SearchResponse searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field1"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("1"), equalTo("2")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("1"), equalTo("2")));
searchResponse = client.prepareSearch().setQuery(constantScoreQuery(exists("field1"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(constantScoreQuery(existsFilter("field1"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("1"), equalTo("2")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("1"), equalTo("2")));
@ -84,27 +84,27 @@ public class SimpleQueryTests extends AbstractNodesTests {
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("1"), equalTo("2")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("1"), equalTo("2")));
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), exists("field2"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field2"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("1"), equalTo("3")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("1"), equalTo("3")));
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), exists("field3"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsFilter("field3"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(1l));
assertThat(searchResponse.hits().getAt(0).id(), equalTo("4"));
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), missing("field1"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("3"), equalTo("4")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("3"), equalTo("4")));
// double check for cache
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), missing("field1"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingFilter("field1"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("3"), equalTo("4")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("3"), equalTo("4")));
searchResponse = client.prepareSearch().setQuery(constantScoreQuery(missing("field1"))).execute().actionGet();
searchResponse = client.prepareSearch().setQuery(constantScoreQuery(missingFilter("field1"))).execute().actionGet();
assertThat(searchResponse.hits().totalHits(), equalTo(2l));
assertThat(searchResponse.hits().getAt(0).id(), anyOf(equalTo("3"), equalTo("4")));
assertThat(searchResponse.hits().getAt(1).id(), anyOf(equalTo("3"), equalTo("4")));