diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 77228d16fa1..5ace3cb7a40 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -140,6 +140,10 @@ public class MapperService extends AbstractIndexComponent implements Iterable iterator() { return mappers.values().iterator(); diff --git a/src/main/java/org/elasticsearch/index/search/nested/IncludeAllChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/nested/IncludeAllChildrenQuery.java new file mode 100644 index 00000000000..85e3f38787f --- /dev/null +++ b/src/main/java/org/elasticsearch/index/search/nested/IncludeAllChildrenQuery.java @@ -0,0 +1,274 @@ +package org.elasticsearch.index.search.nested; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.*; +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.common.lucene.docset.FixedBitDocSet; + +import java.io.IOException; +import java.util.Set; + +/** + * A special query that accepts a top level parent matching query, and returns all the children of that parent as + * well. This is handy when deleting by query. + */ +public class IncludeAllChildrenQuery extends Query { + + + private final Filter parentFilter; + private final Query parentQuery; + + // If we are rewritten, this is the original childQuery we + // were passed; we use this for .equals() and + // .hashCode(). This makes rewritten query equal the + // original, so that user does not have to .rewrite() their + // query before searching: + private final Query origParentQuery; + + + public IncludeAllChildrenQuery(Query parentQuery, Filter parentFilter) { + this.origParentQuery = parentQuery; + this.parentQuery = parentQuery; + this.parentFilter = parentFilter; + } + + IncludeAllChildrenQuery(Query origParentQuery, Query parentQuery, Filter parentFilter) { + this.origParentQuery = origParentQuery; + this.parentQuery = parentQuery; + this.parentFilter = parentFilter; + } + + @Override + public Weight createWeight(Searcher searcher) throws IOException { + return new IncludeAllChildrenWeight(parentQuery, parentQuery.createWeight(searcher), parentFilter); + } + + static class IncludeAllChildrenWeight extends Weight { + + private final Query parentQuery; + private final Weight parentWeight; + private final Filter parentsFilter; + + IncludeAllChildrenWeight(Query parentQuery, Weight parentWeight, Filter parentsFilter) { + this.parentQuery = parentQuery; + this.parentWeight = parentWeight; + this.parentsFilter = parentsFilter; + } + + @Override + public Query getQuery() { + return parentQuery; + } + + @Override + public float getValue() { + return parentWeight.getValue(); + } + + @Override + public float sumOfSquaredWeights() throws IOException { + return parentWeight.sumOfSquaredWeights() * parentQuery.getBoost() * parentQuery.getBoost(); + } + + @Override + public void normalize(float norm) { + parentWeight.normalize(norm * parentQuery.getBoost()); + } + + @Override + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { + final Scorer parentScorer = parentWeight.scorer(reader, true, false); + + // no matches + if (parentScorer == null) { + return null; + } + + final int firstParentDoc = parentScorer.nextDoc(); + if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) { + // No matches + return null; + } + + DocIdSet parents = parentsFilter.getDocIdSet(reader); + if (parents == null) { + // No matches + return null; + } + if (parents instanceof FixedBitDocSet) { + parents = ((FixedBitDocSet) parents).set(); + } + if (!(parents instanceof FixedBitSet)) { + throw new IllegalStateException("parentFilter must return OpenBitSet; got " + parents); + } + + + return new IncludeAllChildrenScorer(this, parentScorer, (FixedBitSet) parents, firstParentDoc); + } + + @Override + public Explanation explain(IndexReader reader, int doc) throws IOException { + return null; + } + + @Override + public boolean scoresDocsOutOfOrder() { + return false; + } + } + + static class IncludeAllChildrenScorer extends Scorer { + + private final Scorer parentScorer; + private final FixedBitSet parentBits; + + private int currentChildPointer = -1; + private int currentParentPointer = -1; + + private int currentDoc = -1; + + IncludeAllChildrenScorer(Weight weight, Scorer parentScorer, FixedBitSet parentBits, int currentParentPointer) { + super(weight); + this.parentScorer = parentScorer; + this.parentBits = parentBits; + this.currentParentPointer = currentParentPointer; + if (currentParentPointer == 0) { + currentChildPointer = 0; + } else { + this.currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1); + if (currentChildPointer == -1) { + // no previous set parent, we delete from doc 0 + currentChildPointer = 0; + } else { + currentChildPointer++; // we only care about children + } + } + + currentDoc = currentChildPointer; + } + + @Override + protected void visitSubScorers(Query parent, BooleanClause.Occur relationship, ScorerVisitor visitor) { + super.visitSubScorers(parent, relationship, visitor); + parentScorer.visitScorers(visitor); + } + + @Override + public int nextDoc() throws IOException { + if (currentParentPointer == NO_MORE_DOCS) { + return (currentDoc = NO_MORE_DOCS); + } + + if (currentChildPointer == currentParentPointer) { + // we need to return the current parent as well, but prepare to return + // the next set of children + currentDoc = currentParentPointer; + currentParentPointer = parentScorer.nextDoc(); + if (currentParentPointer != NO_MORE_DOCS) { + currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1); + if (currentChildPointer == -1) { + // no previous set parent, just set the child to the current parent + currentChildPointer = currentParentPointer; + } else { + currentChildPointer++; // we only care about children + } + } + } else { + currentDoc = currentChildPointer++; + } + + assert currentDoc != -1; + return currentDoc; + } + + @Override + public int advance(int target) throws IOException { + if (target == NO_MORE_DOCS) { + return (currentDoc = NO_MORE_DOCS); + } + + if (target == 0) { + return nextDoc(); + } + + currentParentPointer = parentScorer.advance(target); + if (currentParentPointer == NO_MORE_DOCS) { + return (currentDoc = NO_MORE_DOCS); + } + if (currentParentPointer == 0) { + currentChildPointer = 0; + } else { + currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1); + if (currentChildPointer == -1) { + // no previous set parent, just set the child to 0 to delete all up to the parent + currentChildPointer = 0; + } else { + currentChildPointer++; // we only care about children + } + } + + currentDoc = currentChildPointer; + + return currentDoc; + } + + @Override + public float score() throws IOException { + return parentScorer.score(); + } + + @Override + public int docID() { + return currentDoc; + } + } + + @Override + public void extractTerms(Set terms) { + parentQuery.extractTerms(terms); + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Query parentRewrite = parentQuery.rewrite(reader); + if (parentRewrite != parentQuery) { + Query rewritten = new IncludeAllChildrenQuery(parentQuery, parentRewrite, parentFilter); + rewritten.setBoost(getBoost()); + return rewritten; + } else { + return this; + } + } + + @Override + public String toString(String field) { + return "IncludeAllChildrenQuery (" + parentQuery.toString() + ")"; + } + + @Override + public boolean equals(Object _other) { + if (_other instanceof IncludeAllChildrenQuery) { + final IncludeAllChildrenQuery other = (IncludeAllChildrenQuery) _other; + return origParentQuery.equals(other.origParentQuery) && + parentFilter.equals(other.parentFilter); + } else { + return false; + } + } + + @Override + public int hashCode() { + final int prime = 31; + int hash = 1; + hash = prime * hash + origParentQuery.hashCode(); + hash = prime * hash + parentFilter.hashCode(); + return hash; + } + + @Override + public Object clone() { + return new IncludeAllChildrenQuery((Query) origParentQuery.clone(), + parentFilter); + } +} diff --git a/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java b/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java index 7a82e0d0673..ecca0b93068 100644 --- a/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/service/InternalIndexShard.java @@ -52,6 +52,8 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.refresh.RefreshStats; +import org.elasticsearch.index.search.nested.IncludeAllChildrenQuery; +import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.ShardSearchService; import org.elasticsearch.index.settings.IndexSettings; @@ -359,6 +361,11 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I @Override public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticSearchException { writeAllowed(); + if (mapperService.hasNested()) { + // we need to wrap it to delete nested docs as well... + IncludeAllChildrenQuery nestedQuery = new IncludeAllChildrenQuery(deleteByQuery.query(), indexCache.filter().cache(NonNestedDocsFilter.INSTANCE)); + deleteByQuery = new Engine.DeleteByQuery(nestedQuery, deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.aliasFilter(), deleteByQuery.types()); + } if (logger.isTraceEnabled()) { logger.trace("delete_by_query [{}]", deleteByQuery.query()); } diff --git a/src/test/java/org/elasticsearch/test/integration/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/test/integration/nested/SimpleNestedTests.java index d3a0c76cdd8..499700e4e75 100644 --- a/src/test/java/org/elasticsearch/test/integration/nested/SimpleNestedTests.java +++ b/src/test/java/org/elasticsearch/test/integration/nested/SimpleNestedTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.facet.FacetBuilders; import org.elasticsearch.search.facet.termsstats.TermsStatsFacet; import org.elasticsearch.test.integration.AbstractNodesTests; @@ -35,6 +35,7 @@ import org.testng.annotations.Test; import java.util.Arrays; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.nestedFilter; import static org.elasticsearch.index.query.QueryBuilders.*; @@ -178,6 +179,118 @@ public class SimpleNestedTests extends AbstractNodesTests { assertThat(searchResponse.hits().totalHits(), equalTo(1l)); } + @Test + public void simpleNestedDeletedByQuery1() throws Exception { + simpleNestedDeleteByQuery(3, 0); + } + + @Test + public void simpleNestedDeletedByQuery2() throws Exception { + simpleNestedDeleteByQuery(3, 1); + } + + @Test + public void simpleNestedDeletedByQuery3() throws Exception { + simpleNestedDeleteByQuery(3, 2); + } + + private void simpleNestedDeleteByQuery(int total, int docToDelete) throws Exception { + client.admin().indices().prepareDelete().execute().actionGet(); + + client.admin().indices().prepareCreate("test") + .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build()) + .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject().endObject().endObject()) + .execute().actionGet(); + + client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + + + for (int i = 0; i < total; i++) { + client.prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1_1") + .field("n_field2", "n_value2_1") + .endObject() + .startObject() + .field("n_field1", "n_value1_2") + .field("n_field2", "n_value2_2") + .endObject() + .endArray() + .endObject()).execute().actionGet(); + } + + + client.admin().indices().prepareFlush().setRefresh(true).execute().actionGet(); + IndicesStatusResponse statusResponse = client.admin().indices().prepareStatus().execute().actionGet(); + assertThat(statusResponse.index("test").docs().numDocs(), equalTo(total * 3)); + + client.prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); + client.admin().indices().prepareFlush().setRefresh(true).execute().actionGet(); + statusResponse = client.admin().indices().prepareStatus().execute().actionGet(); + assertThat(statusResponse.index("test").docs().numDocs(), equalTo((total * 3) - 3)); + + for (int i = 0; i < total; i++) { + assertThat(client.prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().exists(), equalTo(i != docToDelete)); + } + } + + @Test + public void noChildrenNestedDeletedByQuery1() throws Exception { + noChildrenNestedDeleteByQuery(3, 0); + } + + @Test + public void noChildrenNestedDeletedByQuery2() throws Exception { + noChildrenNestedDeleteByQuery(3, 1); + } + + @Test + public void noChildrenNestedDeletedByQuery3() throws Exception { + noChildrenNestedDeleteByQuery(3, 2); + } + + private void noChildrenNestedDeleteByQuery(int total, int docToDelete) throws Exception { + client.admin().indices().prepareDelete().execute().actionGet(); + + client.admin().indices().prepareCreate("test") + .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.referesh_interval", -1).build()) + .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject().endObject().endObject()) + .execute().actionGet(); + + client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); + + + for (int i = 0; i < total; i++) { + client.prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() + .field("field1", "value1") + .endObject()).execute().actionGet(); + } + + + client.admin().indices().prepareFlush().setRefresh(true).execute().actionGet(); + IndicesStatusResponse statusResponse = client.admin().indices().prepareStatus().execute().actionGet(); + assertThat(statusResponse.index("test").docs().numDocs(), equalTo(total)); + + client.prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); + client.admin().indices().prepareFlush().setRefresh(true).execute().actionGet(); + statusResponse = client.admin().indices().prepareStatus().execute().actionGet(); + assertThat(statusResponse.index("test").docs().numDocs(), equalTo((total) - 1)); + + for (int i = 0; i < total; i++) { + assertThat(client.prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().exists(), equalTo(i != docToDelete)); + } + } + @Test public void multiNested() throws Exception { client.admin().indices().prepareDelete().execute().actionGet(); @@ -267,7 +380,7 @@ public class SimpleNestedTests extends AbstractNodesTests { client.admin().indices().prepareDelete().execute().actionGet(); client.admin().indices().prepareCreate("test") - .setSettings(ImmutableSettings.settingsBuilder().put("number_of_shards", numberOfShards)) + .setSettings(settingsBuilder().put("number_of_shards", numberOfShards)) .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("nested1") .field("type", "nested").startObject("properties")