Improved SearchContext.addReleasable.

For resources that have their life time effectively defined by the search
context they are attached to, it is convenient to use the search context to
schedule the release of such resources.

This commit changes aggregations to use this mechanism and also introduces
a `Lifetime` object that can be used to define how long the object should
live:
 - COLLECTION: if the object only needs to live during collection time and is
   what SearchContext.addReleasable would have chosen before this change
   (used for p/c queries),
 - SEARCH_PHASE for resources that only need to live during the current search
   phase (DFS, QUERY or FETCH),
 - SEARCH_CONTEXT for resources that need to live until the context is
   destroyed.

Aggregators are currently registed with SEARCH_CONTEXT. The reason is that when
using the DFS_QUERY_THEN_FETCH search type, they are allocated during the DFS
phase but only used during the QUERY phase. However we should fix it in order
to only allocate them during the QUERY phase and use SEARCH_PHASE as a life
time.

Close #5703
This commit is contained in:
Adrien Grand 2014-04-11 11:18:49 +02:00
parent e589301806
commit e458d4fd93
24 changed files with 156 additions and 250 deletions

View File

@ -124,9 +124,9 @@ public class TransportShardDeleteByQueryAction extends TransportShardReplication
SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query(), ImmutableMap.<String, Filter>of()));
indexShard.deleteByQuery(deleteByQuery);
} finally {
SearchContext searchContext = SearchContext.current();
searchContext.clearAndRelease();
SearchContext.removeCurrent();
try (SearchContext searchContext = SearchContext.current()) {
SearchContext.removeCurrent();
}
}
return new PrimaryResponse<>(shardRequest.request, new ShardDeleteByQueryResponse(), null);
}
@ -147,9 +147,9 @@ public class TransportShardDeleteByQueryAction extends TransportShardReplication
SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query(), ImmutableMap.<String, Filter>of()));
indexShard.deleteByQuery(deleteByQuery);
} finally {
SearchContext searchContext = SearchContext.current();
searchContext.clearAndRelease();
SearchContext.removeCurrent();
try (SearchContext searchContext = SearchContext.current();) {
SearchContext.removeCurrent();
}
}
}

View File

@ -41,6 +41,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.Set;
@ -123,7 +124,7 @@ public class ChildrenConstantScoreQuery extends Query {
shortCircuitFilter = new ParentIdsFilter(parentType, nonNestedDocsFilter, parentIds);
}
final ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, parentIds);
searchContext.addReleasable(parentWeight);
searchContext.addReleasable(parentWeight, Lifetime.COLLECTION);
releaseParentIds = false;
return parentWeight;
} finally {

View File

@ -41,6 +41,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.Arrays;
@ -219,7 +220,7 @@ public class ChildrenQuery extends Query {
parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
}
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentFilter, size, parentIds, scores, occurrences);
searchContext.addReleasable(parentWeight);
searchContext.addReleasable(parentWeight, Lifetime.COLLECTION);
return parentWeight;
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.search.NoCacheFilter;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.IdentityHashMap;
@ -66,7 +67,7 @@ public class CustomQueryWrappingFilter extends NoCacheFilter implements Releasab
IndexSearcher searcher = searchContext.searcher();
docIdSets = new IdentityHashMap<>();
this.searcher = searcher;
searchContext.addReleasable(this);
searchContext.addReleasable(this, Lifetime.COLLECTION);
final Weight weight = searcher.createNormalizedWeight(query);
for (final AtomicReaderContext leaf : searcher.getTopReaderContext().leaves()) {

View File

@ -36,6 +36,7 @@ import org.elasticsearch.index.fielddata.BytesValues;
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.Set;
@ -104,7 +105,7 @@ public class ParentConstantScoreQuery extends Query {
}
final ChildrenWeight childrenWeight = new ChildrenWeight(childrenFilter, parentIds);
searchContext.addReleasable(childrenWeight);
searchContext.addReleasable(childrenWeight, Lifetime.COLLECTION);
releaseParentIds = false;
return childrenWeight;
} finally {

View File

@ -40,6 +40,7 @@ import org.elasticsearch.index.fielddata.BytesValues;
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.Set;
@ -156,7 +157,7 @@ public class ParentQuery extends Query {
Releasables.close(collector.parentIds, collector.scores);
}
}
searchContext.addReleasable(childWeight);
searchContext.addReleasable(childWeight, Lifetime.COLLECTION);
return childWeight;
}

View File

@ -38,6 +38,7 @@ import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.Arrays;
@ -168,7 +169,7 @@ public class TopChildrenQuery extends Query {
}
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentDocs);
searchContext.addReleasable(parentWeight);
searchContext.addReleasable(parentWeight, Lifetime.COLLECTION);
return parentWeight;
}

View File

@ -23,7 +23,6 @@ import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.search.*;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.percolate.PercolateShardRequest;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cache.recycler.CacheRecycler;
@ -208,7 +207,7 @@ public class PercolateContext extends SearchContext {
}
@Override
public void close() throws ElasticsearchException {
protected void doClose() {
try (Releasable releasable = Releasables.wrap(engineSearcher, docSearcher)) {
if (docSearcher != null) {
IndexReader indexReader = docSearcher.reader();
@ -291,11 +290,6 @@ public class PercolateContext extends SearchContext {
}
// Unused:
@Override
public void clearAndRelease() {
throw new UnsupportedOperationException();
}
@Override
public void preProcess() {
throw new UnsupportedOperationException();
@ -675,16 +669,6 @@ public class PercolateContext extends SearchContext {
throw new UnsupportedOperationException();
}
@Override
public void addReleasable(Releasable releasable) {
throw new UnsupportedOperationException();
}
@Override
public void clearReleasables() {
throw new UnsupportedOperationException();
}
@Override
public ScanContext scanContext() {
throw new UnsupportedOperationException();

View File

@ -69,10 +69,8 @@ import org.elasticsearch.search.dfs.CachedDfSource;
import org.elasticsearch.search.dfs.DfsPhase;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.*;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.internal.*;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.query.*;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
@ -575,6 +573,8 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
}
private void cleanContext(SearchContext context) {
assert context == SearchContext.current();
context.clearReleasables(Lifetime.PHASE);
SearchContext.removeCurrent();
}

View File

@ -107,37 +107,34 @@ public class AggregationPhase implements SearchPhase {
}
Aggregator[] aggregators = context.aggregations().aggregators();
try (Releasable releasable = Releasables.wrap(aggregators)) {
List<Aggregator> globals = new ArrayList<>();
for (int i = 0; i < aggregators.length; i++) {
if (aggregators[i] instanceof GlobalAggregator) {
globals.add(aggregators[i]);
}
List<Aggregator> globals = new ArrayList<>();
for (int i = 0; i < aggregators.length; i++) {
if (aggregators[i] instanceof GlobalAggregator) {
globals.add(aggregators[i]);
}
// optimize the global collector based execution
if (!globals.isEmpty()) {
AggregationsCollector collector = new AggregationsCollector(globals, context.aggregations().aggregationContext());
Query query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
Filter searchFilter = context.searchFilter(context.types());
if (searchFilter != null) {
query = new XFilteredQuery(query, searchFilter);
}
try {
context.searcher().search(query, collector);
} catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
}
collector.postCollection();
}
List<InternalAggregation> aggregations = new ArrayList<>(aggregators.length);
for (Aggregator aggregator : context.aggregations().aggregators()) {
aggregations.add(aggregator.buildAggregation(0));
}
context.queryResult().aggregations(new InternalAggregations(aggregations));
}
// optimize the global collector based execution
if (!globals.isEmpty()) {
AggregationsCollector collector = new AggregationsCollector(globals, context.aggregations().aggregationContext());
Query query = new XConstantScoreQuery(Queries.MATCH_ALL_FILTER);
Filter searchFilter = context.searchFilter(context.types());
if (searchFilter != null) {
query = new XFilteredQuery(query, searchFilter);
}
try {
context.searcher().search(query, collector);
} catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
}
collector.postCollection();
}
List<InternalAggregation> aggregations = new ArrayList<>(aggregators.length);
for (Aggregator aggregator : context.aggregations().aggregators()) {
aggregations.add(aggregator.buildAggregation(0));
}
context.queryResult().aggregations(new InternalAggregations(aggregations));
}

View File

@ -19,12 +19,12 @@
package org.elasticsearch.search.aggregations;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.ReaderContextAware;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.ArrayList;
@ -84,6 +84,9 @@ public abstract class Aggregator implements Releasable, ReaderContextAware {
assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead";
this.factories = factories;
this.subAggregators = factories.createSubAggregators(this, estimatedBucketsCount);
// TODO: change it to SEARCH_PHASE, but this would imply allocating the aggregators in the QUERY
// phase instead of DFS like it is done today
context.searchContext().addReleasable(this, Lifetime.CONTEXT);
}
/**
@ -175,9 +178,7 @@ public abstract class Aggregator implements Releasable, ReaderContextAware {
/** Called upon release of the aggregator. */
@Override
public void close() {
try (Releasable releasable = Releasables.wrap(subAggregators)) {
doClose();
}
doClose();
}
/** Release instance-specific data. */

View File

@ -18,8 +18,6 @@
*/
package org.elasticsearch.search.aggregations;
import com.google.common.collect.Iterables;
import com.google.common.collect.UnmodifiableIterator;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.ObjectArray;
@ -28,8 +26,6 @@ import org.elasticsearch.search.aggregations.support.AggregationContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
@ -82,9 +78,6 @@ public class AggregatorFactories {
long arraySize = estimatedBucketsCount > 0 ? estimatedBucketsCount : 1;
aggregators = bigArrays.newObjectArray(arraySize);
aggregators.set(0, first);
for (long i = 1; i < arraySize; ++i) {
aggregators.set(i, createAndRegisterContextAware(parent.context(), factory, parent, estimatedBucketsCount));
}
}
@Override
@ -135,29 +128,7 @@ public class AggregatorFactories {
@Override
public void doClose() {
final Iterable<Aggregator> aggregatorsIter = new Iterable<Aggregator>() {
@Override
public Iterator<Aggregator> iterator() {
return new UnmodifiableIterator<Aggregator>() {
long i = 0;
@Override
public boolean hasNext() {
return i < aggregators.size();
}
@Override
public Aggregator next() {
return aggregators.get(i++);
}
};
}
};
Releasables.close(Iterables.concat(aggregatorsIter, Collections.singleton(aggregators)));
Releasables.close(aggregators);
}
};
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.List;
@ -97,7 +98,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase {
} catch (IOException e) {
// ignore
} finally {
SearchContext.current().clearReleasables();
SearchContext.current().clearReleasables(Lifetime.COLLECTION);
}
}
}

View File

@ -21,6 +21,8 @@ package org.elasticsearch.search.internal;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.*;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.MinimumScoreCollector;
import org.elasticsearch.common.lucene.MultiCollector;
import org.elasticsearch.common.lucene.search.FilteredCollector;
@ -28,6 +30,7 @@ import org.elasticsearch.common.lucene.search.XCollector;
import org.elasticsearch.common.lucene.search.XFilteredQuery;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.CachedDfSource;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
import java.util.ArrayList;
@ -36,7 +39,7 @@ import java.util.List;
/**
* Context-aware extension of {@link IndexSearcher}.
*/
public class ContextIndexSearcher extends IndexSearcher {
public class ContextIndexSearcher extends IndexSearcher implements Releasable {
public static enum Stage {
NA,
@ -66,10 +69,9 @@ public class ContextIndexSearcher extends IndexSearcher {
setSimilarity(searcher.searcher().getSimilarity());
}
public void release() {
if (mainDocIdSetCollector != null) {
mainDocIdSetCollector.release();
}
@Override
public void close() {
Releasables.close(mainDocIdSetCollector);
}
public void dfSource(CachedDfSource dfSource) {
@ -129,7 +131,7 @@ public class ContextIndexSearcher extends IndexSearcher {
}
return in.createNormalizedWeight(query);
} catch (Throwable t) {
searchContext.clearReleasables();
searchContext.clearReleasables(Lifetime.COLLECTION);
throw new RuntimeException(t);
}
}
@ -187,7 +189,7 @@ public class ContextIndexSearcher extends IndexSearcher {
}
}
} finally {
searchContext.clearReleasables();
searchContext.clearReleasables(Lifetime.COLLECTION);
}
}
@ -200,7 +202,7 @@ public class ContextIndexSearcher extends IndexSearcher {
XFilteredQuery filteredQuery = new XFilteredQuery(query, searchContext.aliasFilter());
return super.explain(filteredQuery, doc);
} finally {
searchContext.clearReleasables();
searchContext.clearReleasables(Lifetime.COLLECTION);
}
}
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.search.AndFilter;
import org.elasticsearch.common.lucene.search.Queries;
@ -177,8 +176,6 @@ public class DefaultSearchContext extends SearchContext {
private volatile long lastAccessTime = -1;
private List<Releasable> clearables = null;
private volatile boolean useSlowScroll;
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
@ -207,18 +204,12 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
public void close() throws ElasticsearchException {
public void doClose() throws ElasticsearchException {
if (scanContext != null) {
scanContext.clear();
}
// clear and scope phase we have
searcher.release();
engineSearcher.close();
}
public void clearAndRelease() {
clearReleasables();
close();
Releasables.close(searcher, engineSearcher);
}
/**
@ -677,25 +668,6 @@ public class DefaultSearchContext extends SearchContext {
return fetchResult;
}
@Override
public void addReleasable(Releasable releasable) {
if (clearables == null) {
clearables = new ArrayList<>();
}
clearables.add(releasable);
}
@Override
public void clearReleasables() {
if (clearables != null) {
try {
Releasables.close(clearables);
} finally {
clearables.clear();
}
}
}
public ScanContext scanContext() {
if (scanContext == null) {
scanContext = new ScanContext();

View File

@ -23,6 +23,7 @@ import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.docset.ContextDocIdSet;
import org.elasticsearch.common.lucene.search.XCollector;
import org.elasticsearch.index.cache.docset.DocSetCache;
@ -33,7 +34,7 @@ import java.util.List;
/**
*/
public class DocIdSetCollector extends XCollector {
public class DocIdSetCollector extends XCollector implements Releasable {
private final DocSetCache docSetCache;
private final Collector collector;
@ -53,7 +54,7 @@ public class DocIdSetCollector extends XCollector {
return docSets;
}
public void release() {
public void close() {
for (ContextDocIdSet docSet : docSets) {
docSetCache.release(docSet);
}

View File

@ -18,6 +18,9 @@
*/
package org.elasticsearch.search.internal;
import com.google.common.collect.Iterables;
import com.google.common.collect.Multimap;
import com.google.common.collect.MultimapBuilder;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
@ -27,6 +30,7 @@ import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.docset.DocSetCache;
@ -59,6 +63,8 @@ import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.scan.ScanContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
@ -81,7 +87,17 @@ public abstract class SearchContext implements Releasable {
return current.get();
}
public abstract void clearAndRelease();
private Multimap<Lifetime, Releasable> clearables = null;
public final void close() {
try {
clearReleasables(Lifetime.CONTEXT);
} finally {
doClose();
}
}
protected abstract void doClose();
/**
* Should be called before executing the main query and after all other parameters have been set.
@ -288,9 +304,29 @@ public abstract class SearchContext implements Releasable {
public abstract FetchSearchResult fetchResult();
public abstract void addReleasable(Releasable releasable);
/**
* Schedule the release of a resource. The time when {@link Releasable#release()} will be called on this object
* is function of the provided {@link Lifetime}.
*/
public void addReleasable(Releasable releasable, Lifetime lifetime) {
if (clearables == null) {
clearables = MultimapBuilder.enumKeys(Lifetime.class).arrayListValues().build();
}
clearables.put(lifetime, releasable);
}
public abstract void clearReleasables();
public void clearReleasables(Lifetime lifetime) {
if (clearables != null) {
List<Collection<Releasable>> releasables = new ArrayList<>();
for (Lifetime lc : Lifetime.values()) {
if (lc.compareTo(lifetime) > 0) {
break;
}
releasables.add(clearables.removeAll(lc));
}
Releasables.close(Iterables.concat(releasables));
}
}
public abstract ScanContext scanContext();
@ -305,4 +341,22 @@ public abstract class SearchContext implements Releasable {
public abstract boolean useSlowScroll();
public abstract SearchContext useSlowScroll(boolean useSlowScroll);
/**
* The life time of an object that is used during search execution.
*/
public enum Lifetime {
/**
* This life time is for objects that only live during collection time.
*/
COLLECTION,
/**
* This life time is for objects that need to live until the end of the current search phase.
*/
PHASE,
/**
* This life time is for objects that need to live until the search context they are attached to is destroyed.
*/
CONTEXT;
}
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.docset.DocSetCache;
@ -94,11 +93,6 @@ public class TestSearchContext extends SearchContext {
this.indexFieldDataService = null;
}
@Override
public void clearAndRelease() {
// no-op
}
@Override
public void preProcess() {
}
@ -556,14 +550,6 @@ public class TestSearchContext extends SearchContext {
return null;
}
@Override
public void addReleasable(Releasable releasable) {
}
@Override
public void clearReleasables() {
}
@Override
public ScanContext scanContext() {
return null;
@ -590,7 +576,7 @@ public class TestSearchContext extends SearchContext {
}
@Override
public void close() throws ElasticsearchException {
public void doClose() throws ElasticsearchException {
// no-op
}

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@ -33,7 +32,6 @@ import org.elasticsearch.search.aggregations.metrics.stats.Stats;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.cache.recycler.MockBigArrays;
import org.hamcrest.Matchers;
import org.junit.Test;
@ -217,7 +215,6 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_WithSubAggregation() throws Exception {
SearchResponse response = client().prepareSearch("idx").setTypes("type")
@ -433,7 +430,6 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void script_SingleValue() throws Exception {
SearchResponse response = client().prepareSearch("idx").setTypes("type")
@ -615,7 +611,6 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void emptyAggregation() throws Exception {
SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
@ -756,11 +751,8 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
assertThat(max.getValue(), equalTo(asc ? 4.0 : 2.0));
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -776,11 +768,8 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -797,11 +786,8 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -819,11 +805,8 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -841,7 +824,6 @@ public class DoubleTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
boolean asc = false;

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@ -32,7 +31,6 @@ import org.elasticsearch.search.aggregations.metrics.stats.Stats;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.cache.recycler.MockBigArrays;
import org.hamcrest.Matchers;
import org.junit.Test;
@ -513,7 +511,6 @@ public class LongTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
@ -753,11 +750,8 @@ public class LongTermsTests extends ElasticsearchIntegrationTest {
assertThat(max.getValue(), equalTo(asc ? 4.0 : 2.0));
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -773,11 +767,8 @@ public class LongTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -794,11 +785,8 @@ public class LongTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -816,11 +804,8 @@ public class LongTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@ -33,7 +32,6 @@ import org.elasticsearch.search.aggregations.metrics.max.Max;
import org.elasticsearch.search.aggregations.metrics.stats.Stats;
import org.elasticsearch.search.aggregations.metrics.sum.Sum;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.cache.recycler.MockBigArrays;
import org.hamcrest.Matchers;
import org.junit.Test;
@ -186,10 +184,8 @@ public class NestedTests extends ElasticsearchIntegrationTest {
assertThat(stats.getAvg(), equalTo((double) sum / count));
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void onNonNestedField() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx")
.addAggregation(nested("nested").path("value")

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket;
import com.google.common.base.Strings;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@ -27,12 +26,12 @@ import org.elasticsearch.index.query.FilterBuilders;
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
import org.elasticsearch.search.aggregations.metrics.stats.Stats;
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.cache.recycler.MockBigArrays;
import org.hamcrest.Matchers;
import org.junit.Test;
@ -45,7 +44,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.FilterBuilders.termFilter;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
import static org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
@ -943,11 +941,8 @@ public class StringTermsTests extends ElasticsearchIntegrationTest {
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -964,11 +959,8 @@ public class StringTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
@ -986,11 +978,8 @@ public class StringTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")
.addAggregation(terms("terms")
@ -1008,11 +997,8 @@ public class StringTermsTests extends ElasticsearchIntegrationTest {
}
}
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/5703")
@Test
public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
MockBigArrays.discardNextCheck();
try {
client().prepareSearch("idx").setTypes("type")

View File

@ -45,26 +45,11 @@ public class MockBigArrays extends BigArrays {
*/
private static final boolean TRACK_ALLOCATIONS = false;
private static boolean DISCARD = false;
private static ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>();
/**
* Discard the next check that all arrays should be released. This can be useful if for a specific test, the cost to make
* sure the array is released is higher than the cost the user would experience if the array would not be released.
*/
public static void discardNextCheck() {
DISCARD = true;
}
public static void ensureAllArraysAreReleased() throws Exception {
if (DISCARD) {
DISCARD = false;
} else {
final Map<Object, Object> masterCopy = Maps.newHashMap(ACQUIRED_ARRAYS);
if (masterCopy.isEmpty()) {
return;
}
final Map<Object, Object> masterCopy = Maps.newHashMap(ACQUIRED_ARRAYS);
if (!masterCopy.isEmpty()) {
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing arrays, lets make sure that after a reasonable timeout, all master
// copy (snapshot) have been released
@ -74,14 +59,13 @@ public class MockBigArrays extends BigArrays {
return Sets.intersection(masterCopy.keySet(), ACQUIRED_ARRAYS.keySet()).isEmpty();
}
});
if (success) {
return;
}
masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
final Object cause = masterCopy.entrySet().iterator().next().getValue();
throw new RuntimeException(masterCopy.size() + " arrays have not been released", cause instanceof Throwable ? (Throwable) cause : null);
if (!success) {
masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
final Object cause = masterCopy.entrySet().iterator().next().getValue();
throw new RuntimeException(masterCopy.size() + " arrays have not been released", cause instanceof Throwable ? (Throwable) cause : null);
}
}
}
}

View File

@ -43,26 +43,24 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
public static void ensureAllPagesAreReleased() throws Exception {
final Map<Object, Throwable> masterCopy = Maps.newHashMap(ACQUIRED_PAGES);
if (masterCopy.isEmpty()) {
return;
}
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing pages, lets make sure that after a reasonable timeout, all master
// copy (snapshot) have been released
boolean success = ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object input) {
return Sets.intersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()).isEmpty();
}
});
if (success) {
return;
}
masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
final Throwable t = masterCopy.entrySet().iterator().next().getValue();
throw new RuntimeException(masterCopy.size() + " pages have not been released", t);
// not empty, we might be executing on a shared cluster that keeps on obtaining
// and releasing pages, lets make sure that after a reasonable timeout, all master
// copy (snapshot) have been released
boolean success = ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object input) {
return Sets.intersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()).isEmpty();
}
});
if (!success) {
masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
if (!masterCopy.isEmpty()) {
final Throwable t = masterCopy.entrySet().iterator().next().getValue();
throw new RuntimeException(masterCopy.size() + " pages have not been released", t);
}
}
}
}