Fixed issue where parent & child queries can fail if a segment doesn't have documents with the targeted type or associated parent type
Closes #2537
This commit is contained in:
parent
43aabe88e8
commit
1ce10dfb06
|
@ -66,8 +66,8 @@ public class HasChildQueryBuilder extends BaseQueryBuilder implements BoostableQ
|
|||
/**
|
||||
* Defines how the scores from the matching child documents are mapped into the parent document.
|
||||
*/
|
||||
public HasChildQueryBuilder scoreType(String executionType) {
|
||||
this.scoreType = executionType;
|
||||
public HasChildQueryBuilder scoreType(String scoreType) {
|
||||
this.scoreType = scoreType;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.search.child;
|
|||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
@ -70,11 +69,14 @@ public class ChildCollector extends Collector {
|
|||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (typeCache == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
HashedBytesArray parentId = typeCache.parentIdByDoc(doc);
|
||||
if (parentId == null) {
|
||||
return;
|
||||
|
|
|
@ -321,6 +321,10 @@ public class ChildrenQuery extends Query implements ScopePhase.CollectorPhase {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (typeCache == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
HashedBytesArray parentUid = typeCache.parentIdByDoc(doc);
|
||||
float previousScore = uidToScore.get(parentUid);
|
||||
float currentScore = scorer.score();
|
||||
|
@ -364,6 +368,10 @@ public class ChildrenQuery extends Query implements ScopePhase.CollectorPhase {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (typeCache == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
HashedBytesArray parentUid = typeCache.parentIdByDoc(doc);
|
||||
float previousScore = uidToScore.get(parentUid);
|
||||
float currentScore = scorer.score();
|
||||
|
|
|
@ -198,7 +198,11 @@ public abstract class HasChildFilter extends Filter implements ScopePhase.Collec
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
collectedUids.add(typeCache.parentIdByDoc(doc));
|
||||
// It can happen that for particular segment no document exist for an specific type. This prevents NPE
|
||||
if (typeCache != null) {
|
||||
collectedUids.add(typeCache.parentIdByDoc(doc));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -163,7 +163,10 @@ public abstract class HasParentFilter extends Filter implements ScopePhase.Colle
|
|||
}
|
||||
|
||||
public void collect(int doc) throws IOException {
|
||||
collectedUids.add(typeCache.idByDoc(doc));
|
||||
// It can happen that for particular segment no document exist for an specific type. This prevents NPE
|
||||
if (typeCache != null) {
|
||||
collectedUids.add(typeCache.idByDoc(doc));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -199,7 +202,12 @@ public abstract class HasParentFilter extends Filter implements ScopePhase.Colle
|
|||
throw new ElasticSearchIllegalStateException("has_parent filter hasn't executed properly");
|
||||
}
|
||||
|
||||
return new ChildrenDocSet(readerContext.reader(), acceptDocs, parentDocs, context, parentType);
|
||||
IdReaderTypeCache currentTypeCache = context.idCache().reader(readerContext.reader()).type(parentType);
|
||||
if (currentTypeCache == null) {
|
||||
return null;
|
||||
} else {
|
||||
return new ChildrenDocSet(readerContext.reader(), currentTypeCache, acceptDocs, parentDocs, context, parentType);
|
||||
}
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
|
@ -213,10 +221,10 @@ public abstract class HasParentFilter extends Filter implements ScopePhase.Colle
|
|||
final Tuple<AtomicReader, IdReaderTypeCache>[] readersToTypeCache;
|
||||
final Map<Object, FixedBitSet> parentDocs;
|
||||
|
||||
ChildrenDocSet(AtomicReader currentReader, @Nullable Bits acceptDocs, Map<Object, FixedBitSet> parentDocs,
|
||||
SearchContext context, String parentType) {
|
||||
ChildrenDocSet(AtomicReader currentReader, IdReaderTypeCache currentTypeCache, @Nullable Bits acceptDocs,
|
||||
Map<Object, FixedBitSet> parentDocs, SearchContext context, String parentType) {
|
||||
super(currentReader.maxDoc(), acceptDocs);
|
||||
this.currentTypeCache = context.idCache().reader(currentReader).type(parentType);
|
||||
this.currentTypeCache = currentTypeCache;
|
||||
this.currentReader = currentReader;
|
||||
this.parentDocs = parentDocs;
|
||||
this.readersToTypeCache = new Tuple[context.searcher().getIndexReader().leaves().size()];
|
||||
|
|
|
@ -160,6 +160,10 @@ public class ParentQuery extends Query implements ScopePhase.CollectorPhase {
|
|||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (typeCache == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
HashedBytesArray parentUid = typeCache.idByDoc(doc);
|
||||
uidToScore.put(parentUid, scorer.score());
|
||||
}
|
||||
|
|
|
@ -1077,4 +1077,65 @@ public class SimpleChildQuerySearchTests extends AbstractNodesTests {
|
|||
assertThat(response.hits().hits()[6].score(), equalTo(5f));
|
||||
}
|
||||
|
||||
@Test
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/2536
|
||||
public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Exception {
|
||||
client.admin().indices().prepareDelete().execute().actionGet();
|
||||
|
||||
client.admin().indices().prepareCreate("test")
|
||||
.addMapping("parent", jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("parent")
|
||||
.endObject()
|
||||
.endObject()
|
||||
).addMapping("child", jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("child")
|
||||
.startObject("_parent")
|
||||
.field("type", "parent")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
).setSettings(
|
||||
ImmutableSettings.settingsBuilder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 0)
|
||||
).execute().actionGet();
|
||||
client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
SearchResponse response = client.prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")))
|
||||
.execute().actionGet();
|
||||
assertThat(response.failedShards(), equalTo(0));
|
||||
assertThat(response.hits().totalHits(), equalTo(0l));
|
||||
|
||||
client.prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject())
|
||||
.setRefresh(true)
|
||||
.execute().actionGet();
|
||||
|
||||
client.prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")).executionType(getExecutionMethod()))
|
||||
.execute().actionGet();
|
||||
assertThat(response.failedShards(), equalTo(0));
|
||||
assertThat(response.hits().totalHits(), equalTo(0l));
|
||||
|
||||
client.prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")).scoreType("max"))
|
||||
.execute().actionGet();
|
||||
assertThat(response.failedShards(), equalTo(0));
|
||||
assertThat(response.hits().totalHits(), equalTo(0l));
|
||||
|
||||
client.prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).executionType(getExecutionMethod()))
|
||||
.execute().actionGet();
|
||||
assertThat(response.failedShards(), equalTo(0));
|
||||
assertThat(response.hits().totalHits(), equalTo(0l));
|
||||
|
||||
client.prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).scoreType("score"))
|
||||
.execute().actionGet();
|
||||
assertThat(response.failedShards(), equalTo(0));
|
||||
assertThat(response.hits().totalHits(), equalTo(0l));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue