From 03087cfc47588766dfe6bebe04f12beb1a951d42 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 14 Jan 2016 14:30:21 -0700 Subject: [PATCH 1/8] Skip capturing least/most FS info for an FS with no total If an operating system reports -1 for the total bytes of a filesystem path, we should ignore it when capturing the least and most available statistics. Relates to #15919 Squashed commit of the following: commit 5d2258ffeff8a0d156295dcc754ab9b6cbb4b02e Author: Lee Hinman Date: Thu Jan 14 14:14:27 2016 -0700 Change test to test positive total with negative 'free' value commit 927e61d4b39692fc147220a955b63b291ad80db5 Author: Lee Hinman Date: Thu Jan 14 13:09:28 2016 -0700 Skip capturing least/most FS info for an FS with no total If an operating system reports -1 for the total bytes of a filesystem path, we should ignore it when capturing the least and most available statistics. Relates to #15919 --- .../cluster/InternalClusterInfoService.java | 22 ++++++++-- .../elasticsearch/cluster/DiskUsageTests.java | 43 +++++++++++++++++++ 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 925a5a12ed6..5107b4495ab 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -406,10 +406,26 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu String nodeId = nodeStats.getNode().id(); String nodeName = nodeStats.getNode().getName(); if (logger.isTraceEnabled()) { - logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable()); + logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", + nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), + leastAvailablePath.getTotal(), leastAvailablePath.getAvailable()); + } + if (leastAvailablePath.getTotal().bytes() < 0) { + if (logger.isTraceEnabled()) { + logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping", + nodeId, leastAvailablePath.getTotal().bytes()); + } + } else { + newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes())); + } + if (mostAvailablePath.getTotal().bytes() < 0) { + if (logger.isTraceEnabled()) { + logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping", + nodeId, mostAvailablePath.getTotal().bytes()); + } + } else { + newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes())); } - newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes())); - newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes())); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 98eea13e673..f581e4c91f6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -164,7 +164,50 @@ public class DiskUsageTests extends ESTestCase { assertDiskUsage(mostNode_3, node3FSInfo[1]); } + public void testFillDiskUsageSomeInvalidValues() { + ImmutableOpenMap.Builder newLeastAvailableUsages = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder newMostAvailableUsages = ImmutableOpenMap.builder(); + FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), + new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1), + new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280), + }; + FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/least_most", "/dev/sda", -2, -1, -1), + }; + + FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { + new FsInfo.Path("/most", "/dev/sda", 100, 90, 70), + new FsInfo.Path("/least", "/dev/sda", 10, -8, 0), + }; + NodeStats[] nodeStats = new NodeStats[] { + new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null), + new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null), + new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0, + null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null) + }; + InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages); + DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1"); + DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1"); + assertNull("node1 should have been skipped", leastNode_1); + assertDiskUsage(mostNode_1, node1FSInfo[2]); + + DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2"); + DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2"); + assertNull("node2 should have been skipped", leastNode_2); + assertNull("node2 should have been skipped", mostNode_2); + + DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3"); + DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3"); + assertDiskUsage(leastNode_3, node3FSInfo[1]); + assertDiskUsage(mostNode_3, node3FSInfo[0]); + } + private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) { + assertNotNull(usage); + assertNotNull(path); assertEquals(usage.toString(), usage.getPath(), path.getPath()); assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().bytes()); assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().bytes()); From 871b38afcb373d128b8061ffe13ae740304da282 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 14 Jan 2016 15:56:57 +0100 Subject: [PATCH 2/8] Check cluster health in integration test wait condition With this commit we do not check only if an endpoint is up but we also check that the cluster status is green. Previously, builds sporadically failed to pass this condition. --- .../org/elasticsearch/gradle/test/ClusterConfiguration.groovy | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 2741019b751..c9db5657ba4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -57,12 +57,10 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.echo(message: "[${LocalDateTime.now()}] Waiting for elasticsearch node ${node.httpUri()}", level: "info") - ant.get(src: "http://${node.httpUri()}", + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}", dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) - ant.echo(message: "[${LocalDateTime.now()}] Finished waiting for elasticsearch node ${node.httpUri()}. Reachable? ${tmpFile.exists()}", level: "info") return tmpFile.exists() } From a05ea535ad68de3ffa876c2c7c78d9d203d499be Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 12 Jan 2016 21:12:15 +0100 Subject: [PATCH 3/8] percolator: Make sure that start time is serialized on the mpercolate shard requests Closes #15908 --- .../percolate/PercolateShardRequest.java | 4 --- .../TransportShardMultiPercolateAction.java | 14 ++------- .../percolator/MultiPercolatorIT.java | 29 +++++++++++++++++++ .../PercolateDocumentParserTests.java | 27 +++++++---------- 4 files changed, 43 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java index 1e880505b97..74d531df0c4 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardRequest.java @@ -52,10 +52,6 @@ public class PercolateShardRequest extends BroadcastShardRequest { this.startTime = request.startTime; } - public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) { - super(shardId, originalIndices); - } - PercolateShardRequest(ShardId shardId, PercolateRequest request) { super(shardId, request); this.documentType = request.documentType(); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 1d29e6c3971..7140af93ed0 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -160,12 +160,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi items = new ArrayList<>(size); for (int i = 0; i < size; i++) { int slot = in.readVInt(); - OriginalIndices originalIndices = OriginalIndices.readOriginalIndices(in); - PercolateShardRequest shardRequest = new PercolateShardRequest(new ShardId(index, shardId), originalIndices); - shardRequest.documentType(in.readString()); - shardRequest.source(in.readBytesReference()); - shardRequest.docSource(in.readBytesReference()); - shardRequest.onlyCount(in.readBoolean()); + PercolateShardRequest shardRequest = new PercolateShardRequest(); + shardRequest.readFrom(in); Item item = new Item(slot, shardRequest); items.add(item); } @@ -179,11 +175,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi out.writeVInt(items.size()); for (Item item : items) { out.writeVInt(item.slot); - OriginalIndices.writeOriginalIndices(item.request.originalIndices(), out); - out.writeString(item.request.documentType()); - out.writeBytesReference(item.request.source()); - out.writeBytesReference(item.request.docSource()); - out.writeBoolean(item.request.onlyCount()); + item.request.writeTo(out); } } diff --git a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java index 811f010d099..abd158788f0 100644 --- a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java +++ b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorIT.java @@ -33,12 +33,14 @@ import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder; +import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount; @@ -363,6 +365,33 @@ public class MultiPercolatorIT extends ESIntegTestCase { assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q"); } + public void testStartTimeIsPropagatedToShardRequests() throws Exception { + // See: https://github.com/elastic/elasticsearch/issues/15908 + internalCluster().ensureAtLeastNumDataNodes(2); + client().admin().indices().prepareCreate("test") + .setSettings(settingsBuilder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + ) + .addMapping("type", "date_field", "type=date,format=strict_date_optional_time||epoch_millis") + .get(); + ensureGreen(); + + client().prepareIndex("test", ".percolator", "1") + .setSource(jsonBuilder().startObject().field("query", rangeQuery("date_field").lt("now+90d")).endObject()) + .setRefresh(true) + .get(); + + for (int i = 0; i < 32; i++) { + MultiPercolateResponse response = client().prepareMultiPercolate() + .add(client().preparePercolate().setDocumentType("type").setIndices("test") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("date_field", "2015-07-21T10:28:01-07:00"))) + .get(); + assertThat(response.getItems()[0].getResponse().getCount(), equalTo(1L)); + assertThat(response.getItems()[0].getResponse().getMatches()[0].getId().string(), equalTo("1")); + } + } + void initNestedIndexAndPercolation() throws IOException { XContentBuilder mapping = XContentFactory.jsonBuilder(); mapping.startObject().startObject("properties").startObject("companyname").field("type", "string").endObject() diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java index a8897824738..def34b3818f 100644 --- a/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/percolator/PercolateDocumentParserTests.java @@ -66,14 +66,13 @@ import static org.hamcrest.Matchers.nullValue; public class PercolateDocumentParserTests extends ESTestCase { - private Index index; private MapperService mapperService; private PercolateDocumentParser parser; private QueryShardContext queryShardContext; + private PercolateShardRequest request; @Before public void init() { - index = new Index("_index"); IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings( Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) @@ -97,6 +96,10 @@ public class PercolateDocumentParserTests extends ESTestCase { parser = new PercolateDocumentParser( highlightPhase, new SortParseElement(), aggregationPhase, mappingUpdatedAction ); + + request = Mockito.mock(PercolateShardRequest.class); + Mockito.when(request.shardId()).thenReturn(new ShardId(new Index("_index"), 0)); + Mockito.when(request.documentType()).thenReturn("type"); } public void testParseDoc() throws Exception { @@ -105,9 +108,7 @@ public class PercolateDocumentParserTests extends ESTestCase { .field("field1", "value1") .endObject() .endObject(); - PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); - request.documentType("type"); - request.source(source.bytes()); + Mockito.when(request.source()).thenReturn(source.bytes()); PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); @@ -126,9 +127,7 @@ public class PercolateDocumentParserTests extends ESTestCase { .field("size", 123) .startObject("sort").startObject("_score").endObject().endObject() .endObject(); - PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); - request.documentType("type"); - request.source(source.bytes()); + Mockito.when(request.source()).thenReturn(source.bytes()); PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); @@ -151,10 +150,8 @@ public class PercolateDocumentParserTests extends ESTestCase { XContentBuilder docSource = jsonBuilder().startObject() .field("field1", "value1") .endObject(); - PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); - request.documentType("type"); - request.source(source.bytes()); - request.docSource(docSource.bytes()); + Mockito.when(request.source()).thenReturn(source.bytes()); + Mockito.when(request.docSource()).thenReturn(docSource.bytes()); PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext); @@ -180,10 +177,8 @@ public class PercolateDocumentParserTests extends ESTestCase { XContentBuilder docSource = jsonBuilder().startObject() .field("field1", "value1") .endObject(); - PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null); - request.documentType("type"); - request.source(source.bytes()); - request.docSource(docSource.bytes()); + Mockito.when(request.source()).thenReturn(source.bytes()); + Mockito.when(request.docSource()).thenReturn(docSource.bytes()); PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService); try { From cc41e6e7fe2647dba91711266971d93735c7c00d Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 14 Jan 2016 19:01:54 +0100 Subject: [PATCH 4/8] Filter(s) aggregation should create weights only once. We have a performance bug that if a filter aggregation is below a terms aggregation that has a cardinality of 1000, we will call Query.createWeight 1000 times as well. However, Query.createWeight can be a costly operation. For instance in the case of a TermQuery it will seek the term in every segment. Instead, we should create the Weight once, and then get as many iterators as we need from this Weight. I found this problem while trying to diagnose a performance regression while upgrading from 1.7 to 2.1[1]. While the problem was not introduced in 2.x, the fact that 1.7 cached very aggressively had hidden this problem, since you don't need to seek the term anymore on a cached TermFilter. Doing things once for every aggregator is not easy with the current API but I discussed this with Colin and Aggregator factories will need to get an init method for different reasons, where we will be able to put these steps that need to be performed only once, no matter haw many aggregators need to be created. [1] https://discuss.elastic.co/t/aggregations-in-2-1-0-much-slower-than-1-6-0/38056/26 --- .../bucket/filter/FilterAggregator.java | 19 +++++++-- .../bucket/filters/FiltersAggregator.java | 42 +++++++++++++------ .../search/aggregations/bucket/FilterIT.java | 20 +++++++++ .../search/aggregations/bucket/FiltersIT.java | 22 ++++++++++ 4 files changed, 88 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index b1308444894..30c34c35938 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -45,13 +46,13 @@ public class FilterAggregator extends SingleBucketAggregator { private final Weight filter; public FilterAggregator(String name, - Query filter, + Weight filter, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); - this.filter = aggregationContext.searchContext().searcher().createNormalizedWeight(filter, false); + this.filter = filter; } @Override @@ -89,10 +90,22 @@ public class FilterAggregator extends SingleBucketAggregator { this.filter = filter; } + // TODO: refactor in order to initialize the factory once with its parent, + // the context, etc. and then have a no-arg lightweight create method + // (since create may be called thousands of times) + + private IndexSearcher searcher; + private Weight weight; + @Override public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { - return new FilterAggregator(name, filter, factories, context, parent, pipelineAggregators, metaData); + IndexSearcher contextSearcher = context.searchContext().searcher(); + if (searcher != contextSearcher) { + searcher = contextSearcher; + weight = contextSearcher.createNormalizedWeight(filter, false); + } + return new FilterAggregator(name, weight, factories, context, parent, pipelineAggregators, metaData); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index eec7064d1bd..c16089e4765 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.filters; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -57,31 +58,26 @@ public class FiltersAggregator extends BucketsAggregator { } private final String[] keys; - private final Weight[] filters; + private Weight[] filters; private final boolean keyed; private final boolean showOtherBucket; private final String otherBucketKey; private final int totalNumKeys; - public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, String otherBucketKey, + public FiltersAggregator(String name, AggregatorFactories factories, String[] keys, Weight[] filters, boolean keyed, String otherBucketKey, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.keyed = keyed; - this.keys = new String[filters.size()]; - this.filters = new Weight[filters.size()]; + this.keys = keys; + this.filters = filters; this.showOtherBucket = otherBucketKey != null; this.otherBucketKey = otherBucketKey; if (showOtherBucket) { - this.totalNumKeys = filters.size() + 1; + this.totalNumKeys = keys.length + 1; } else { - this.totalNumKeys = filters.size(); - } - for (int i = 0; i < filters.size(); ++i) { - KeyedFilter keyedFilter = filters.get(i); - this.keys[i] = keyedFilter.key; - this.filters[i] = aggregationContext.searchContext().searcher().createNormalizedWeight(keyedFilter.filter, false); + this.totalNumKeys = keys.length; } } @@ -146,6 +142,7 @@ public class FiltersAggregator extends BucketsAggregator { public static class Factory extends AggregatorFactory { private final List filters; + private final String[] keys; private boolean keyed; private String otherBucketKey; @@ -154,12 +151,33 @@ public class FiltersAggregator extends BucketsAggregator { this.filters = filters; this.keyed = keyed; this.otherBucketKey = otherBucketKey; + this.keys = new String[filters.size()]; + for (int i = 0; i < filters.size(); ++i) { + KeyedFilter keyedFilter = filters.get(i); + this.keys[i] = keyedFilter.key; + } } + // TODO: refactor in order to initialize the factory once with its parent, + // the context, etc. and then have a no-arg lightweight create method + // (since create may be called thousands of times) + + private IndexSearcher searcher; + private Weight[] weights; + @Override public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { - return new FiltersAggregator(name, factories, filters, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData); + IndexSearcher contextSearcher = context.searchContext().searcher(); + if (searcher != contextSearcher) { + searcher = contextSearcher; + weights = new Weight[filters.size()]; + for (int i = 0; i < filters.size(); ++i) { + KeyedFilter keyedFilter = filters.get(i); + this.weights[i] = contextSearcher.createNormalizedWeight(keyedFilter.filter, false); + } + } + return new FiltersAggregator(name, factories, keys, weights, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index b447580c7e3..6e97a33e933 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -42,6 +42,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogra import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.core.IsNull.notNullValue; /** @@ -145,6 +146,25 @@ public class FilterIT extends ESIntegTestCase { assertThat((double) filter.getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); } + public void testAsSubAggregation() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field("value").interval(2L).subAggregation( + filter("filter").filter(matchAllQuery()))).get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filter filter = bucket.getAggregations().get("filter"); + assertThat(filter, notNullValue()); + assertEquals(bucket.getDocCount(), filter.getDocCount()); + } + } + public void testWithContextBasedSubAggregation() throws Exception { try { client().prepareSearch("idx") diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index 42e19674095..2235b00c2c7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -44,6 +44,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.filters; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -205,6 +206,27 @@ public class FiltersIT extends ESIntegTestCase { assertThat((double) propertiesCounts[1], equalTo((double) sum / numTag2Docs)); } + public void testAsSubAggregation() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field("value").interval(2L).subAggregation( + filters("filters").filter(matchAllQuery()))).get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getBuckets().size(), greaterThanOrEqualTo(1)); + + for (Histogram.Bucket bucket : histo.getBuckets()) { + Filters filters = bucket.getAggregations().get("filters"); + assertThat(filters, notNullValue()); + assertThat(filters.getBuckets().size(), equalTo(1)); + Filters.Bucket filterBucket = filters.getBuckets().get(0); + assertEquals(bucket.getDocCount(), filterBucket.getDocCount()); + } + } + public void testWithContextBasedSubAggregation() throws Exception { try { From ba8ad9c2b727f9609e16aac6ed2df17f44c6b683 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 14 Jan 2016 11:53:50 -0500 Subject: [PATCH 5/8] Fix calculation of age of pending tasks This commit addresses a time unit conversion bug in calculating the age of a PrioritizedRunnable. The issue was an incorrect conversion from nanoseconds to milliseconds as instead the conversion was to microseconds. This leads to the timeInQueue metric for pending tasks to be off by three orders of magnitude. --- .../util/concurrent/PrioritizedRunnable.java | 14 +++++- .../concurrent/PrioritizedRunnableTests.java | 43 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnableTests.java diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java index 50d6df9a6a7..374759f7889 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedRunnable.java @@ -20,6 +20,9 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.Priority; +import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; + /** * */ @@ -27,14 +30,21 @@ public abstract class PrioritizedRunnable implements Runnable, Comparable Date: Mon, 11 Jan 2016 10:06:46 -0500 Subject: [PATCH 6/8] Fix blended terms for non-strings take 2 It had some funky errors, like lenient:true not working and queries with two integer fields blowing up if there was no analyzer defined on the query. This throws a bunch more tests at it and rejiggers how non-strings are handled so they don't wander off into scary QueryBuilder-land unless they have a nice strong analyzer to protect them. --- .../index/mapper/MappedFieldType.java | 7 +- .../index/search/MatchQuery.java | 85 ++++++++++++----- .../index/search/MultiMatchQuery.java | 55 +++++++---- .../index/query/MatchQueryBuilderTests.java | 47 +++++++++- .../query/MultiMatchQueryBuilderTests.java | 4 +- .../search/query/MultiMatchQueryIT.java | 94 +++++++++++++++++++ 6 files changed, 244 insertions(+), 48 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 5f8049b55fb..09d459fc4a2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -389,7 +389,12 @@ public abstract class MappedFieldType extends FieldType { return false; } - /** Creates a term associated with the field of this mapper for the given value */ + /** + * Creates a term associated with the field of this mapper for the given + * value. Its important to use termQuery when building term queries because + * things like ParentFieldMapper override it to make more interesting + * queries. + */ protected Term createTerm(Object value) { return new Term(name(), indexedValueForSearch(value)); } diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 1b213645ae5..979bfba605f 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -212,10 +212,6 @@ public class MatchQuery { this.zeroTermsQuery = zeroTermsQuery; } - protected boolean forceAnalyzeQueryString() { - return false; - } - protected Analyzer getAnalyzer(MappedFieldType fieldType) { if (this.analyzer == null) { if (fieldType != null) { @@ -240,17 +236,19 @@ public class MatchQuery { field = fieldName; } - if (fieldType != null && fieldType.useTermQueryWithQueryString() && !forceAnalyzeQueryString()) { - try { - return fieldType.termQuery(value, context); - } catch (RuntimeException e) { - if (lenient) { - return null; - } - throw e; - } - + /* + * If the user forced an analyzer we really don't care if they are + * searching a type that wants term queries to be used with query string + * because the QueryBuilder will take care of it. If they haven't forced + * an analyzer then types like NumberFieldType that want terms with + * query string will blow up because their analyzer isn't capable of + * passing through QueryBuilder. + */ + boolean noForcedAnalyzer = this.analyzer == null; + if (fieldType != null && fieldType.useTermQueryWithQueryString() && noForcedAnalyzer) { + return termQuery(fieldType, value); } + Analyzer analyzer = getAnalyzer(fieldType); assert analyzer != null; MatchQueryBuilder builder = new MatchQueryBuilder(analyzer, fieldType); @@ -282,6 +280,26 @@ public class MatchQuery { } } + /** + * Creates a TermQuery-like-query for MappedFieldTypes that don't support + * QueryBuilder which is very string-ish. Just delegates to the + * MappedFieldType for MatchQuery but gets more complex for blended queries. + */ + protected Query termQuery(MappedFieldType fieldType, Object value) { + return termQuery(fieldType, value, lenient); + } + + protected final Query termQuery(MappedFieldType fieldType, Object value, boolean lenient) { + try { + return fieldType.termQuery(value, context); + } catch (RuntimeException e) { + if (lenient) { + return null; + } + throw e; + } + } + protected Query zeroTermsQuery() { return zeroTermsQuery == DEFAULT_ZERO_TERMS_QUERY ? Queries.newMatchNoDocsQuery() : Queries.newMatchAllQuery(); } @@ -289,20 +307,20 @@ public class MatchQuery { private class MatchQueryBuilder extends QueryBuilder { private final MappedFieldType mapper; + /** * Creates a new QueryBuilder using the given analyzer. */ public MatchQueryBuilder(Analyzer analyzer, @Nullable MappedFieldType mapper) { super(analyzer); this.mapper = mapper; - } + } @Override protected Query newTermQuery(Term term) { return blendTermQuery(term, mapper); } - public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) { final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop); final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery(); @@ -352,11 +370,16 @@ public class MatchQuery { protected Query blendTermQuery(Term term, MappedFieldType fieldType) { if (fuzziness != null) { if (fieldType != null) { - Query query = fieldType.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions); - if (query instanceof FuzzyQuery) { - QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod); + try { + Query query = fieldType.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions); + if (query instanceof FuzzyQuery) { + QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod); + } + return query; + } catch (RuntimeException e) { + return new TermQuery(term); + // See long comment below about why we're lenient here. } - return query; } int edits = fuzziness.asDistance(term.text()); FuzzyQuery query = new FuzzyQuery(term, edits, fuzzyPrefixLength, maxExpansions, transpositions); @@ -364,9 +387,25 @@ public class MatchQuery { return query; } if (fieldType != null) { - Query termQuery = fieldType.queryStringTermQuery(term); - if (termQuery != null) { - return termQuery; + /* + * Its a bit weird to default to lenient here but its the backwards + * compatible. It makes some sense when you think about what we are + * doing here: at this point the user has forced an analyzer and + * passed some string to the match query. We cut it up using the + * analyzer and then tried to cram whatever we get into the field. + * lenient=true here means that we try the terms in the query and on + * the off chance that they are actually valid terms then we + * actually try them. lenient=false would mean that we blow up the + * query if they aren't valid terms. "valid" in this context means + * "parses properly to something of the type being queried." So "1" + * is a valid number, etc. + * + * We use the text form here because we we've received the term from + * an analyzer that cut some string into text. + */ + Query query = termQuery(fieldType, term.bytes(), true); + if (query != null) { + return query; } } return new TermQuery(term); diff --git a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index cf30c3dbe47..0421f283600 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MappedFieldType; @@ -104,7 +103,7 @@ public class MultiMatchQuery extends MatchQuery { this.tieBreaker = tieBreaker; } - public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException{ + public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException{ List queries = new ArrayList<>(); for (String fieldName : fieldNames.keySet()) { Float boostValue = fieldNames.get(fieldName); @@ -146,8 +145,8 @@ public class MultiMatchQuery extends MatchQuery { return MultiMatchQuery.super.blendTermQuery(term, fieldType); } - public boolean forceAnalyzeQueryString() { - return false; + public Query termQuery(MappedFieldType fieldType, Object value) { + return MultiMatchQuery.this.termQuery(fieldType, value, lenient); } } @@ -196,8 +195,13 @@ public class MultiMatchQuery extends MatchQuery { } else { blendedFields = null; } - final FieldAndFieldType fieldAndFieldType = group.get(0); - Query q = parseGroup(type.matchQueryType(), fieldAndFieldType.field, 1f, value, minimumShouldMatch); + /* + * We have to pick some field to pass through the superclass so + * we just pick the first field. It shouldn't matter because + * fields are already grouped by their analyzers/types. + */ + String representativeField = group.get(0).field; + Query q = parseGroup(type.matchQueryType(), representativeField, 1f, value, minimumShouldMatch); if (q != null) { queries.add(q); } @@ -206,11 +210,6 @@ public class MultiMatchQuery extends MatchQuery { return queries.isEmpty() ? null : queries; } - @Override - public boolean forceAnalyzeQueryString() { - return blendedFields != null; - } - @Override public Query blendTerm(Term term, MappedFieldType fieldType) { if (blendedFields == null) { @@ -231,6 +230,16 @@ public class MultiMatchQuery extends MatchQuery { } return BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker); } + + @Override + public Query termQuery(MappedFieldType fieldType, Object value) { + /* + * Use the string value of the term because we're reusing the + * portion of the query is usually after the analyzer has run on + * each term. We just skip that analyzer phase. + */ + return blendTerm(new Term(fieldType.name(), value.toString()), fieldType); + } } @Override @@ -241,6 +250,15 @@ public class MultiMatchQuery extends MatchQuery { return queryBuilder.blendTerm(term, fieldType); } + @Override + protected Query termQuery(MappedFieldType fieldType, Object value) { + if (queryBuilder == null) { + // Can be null when the MultiMatchQuery collapses into a MatchQuery + return super.termQuery(fieldType, value); + } + return queryBuilder.termQuery(fieldType, value); + } + private static final class FieldAndFieldType { final String field; final MappedFieldType fieldType; @@ -255,18 +273,17 @@ public class MultiMatchQuery extends MatchQuery { public Term newTerm(String value) { try { - final BytesRef bytesRef = fieldType.indexedValueForSearch(value); - return new Term(field, bytesRef); - } catch (Exception ex) { + /* + * Note that this ignore any overrides the fieldType might do + * for termQuery, meaning things like _parent won't work here. + */ + return new Term(fieldType.name(), fieldType.indexedValueForSearch(value)); + } catch (RuntimeException ex) { // we can't parse it just use the incoming value -- it will // just have a DF of 0 at the end of the day and will be ignored + // Note that this is like lenient = true allways } return new Term(field, value); } } - - @Override - protected boolean forceAnalyzeQueryString() { - return this.queryBuilder == null ? super.forceAnalyzeQueryString() : this.queryBuilder.forceAnalyzeQueryString(); - } } diff --git a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index e7b2923c37e..04f4ea04376 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -24,14 +24,18 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; +import org.hamcrest.Matcher; +import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; import java.util.Locale; @@ -120,15 +124,15 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase termLcMatcher = equalTo(originalTermLc); + if ("false".equals(originalTermLc) || "true".equals(originalTermLc)) { + // Booleans become t/f when querying a boolean field + termLcMatcher = either(termLcMatcher).or(equalTo(originalTermLc.substring(0, 1))); + } + assertThat(actualTermLc, termLcMatcher); assertThat(queryBuilder.prefixLength(), equalTo(fuzzyQuery.getPrefixLength())); assertThat(queryBuilder.fuzzyTranspositions(), equalTo(fuzzyQuery.getTranspositions())); } + + if (query instanceof NumericRangeQuery) { + // These are fuzzy numeric queries + assertTrue(queryBuilder.fuzziness() != null); + @SuppressWarnings("unchecked") + NumericRangeQuery numericRangeQuery = (NumericRangeQuery) query; + assertTrue(numericRangeQuery.includesMin()); + assertTrue(numericRangeQuery.includesMax()); + + double value; + try { + value = Double.parseDouble(queryBuilder.value().toString()); + } catch (NumberFormatException e) { + // Maybe its a date + value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString()); + } + double width; + if (queryBuilder.fuzziness().equals(Fuzziness.AUTO)) { + width = 1; + } else { + try { + width = queryBuilder.fuzziness().asDouble(); + } catch (NumberFormatException e) { + // Maybe a time value? + width = queryBuilder.fuzziness().asTimeValue().getMillis(); + } + } + assertEquals(value - width, numericRangeQuery.getMin().doubleValue(), width * .1); + assertEquals(value + width, numericRangeQuery.getMax().doubleValue(), width * .1); + } } public void testIllegalValues() { diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 36c4f328453..a4af84a8f79 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -132,7 +133,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase Date: Fri, 15 Jan 2016 18:31:19 +0100 Subject: [PATCH 7/8] check that busy waiting does not time out and fix replica counter test --- .../replication/TransportReplicationActionTests.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 0a390ea3706..cf49b4292b8 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -583,7 +583,7 @@ public class TransportReplicationActionTests extends ESTestCase { assertIndexShardCounter(1); } - public void testCounterOnPrimary() throws InterruptedException, ExecutionException, IOException { + public void testCounterOnPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, 0); // no replica, we only want to test on primary @@ -611,9 +611,7 @@ public class TransportReplicationActionTests extends ESTestCase { t.start(); // shard operation should be ongoing, so the counter is at 2 // we have to wait here because increment happens in thread - awaitBusy(() -> count.get() == 2); - - assertIndexShardCounter(2); + assertBusy(() -> assertIndexShardCounter(2)); assertThat(transport.capturedRequests().length, equalTo(0)); ((ActionWithDelay) action).countDownLatch.countDown(); t.join(); @@ -664,7 +662,7 @@ public class TransportReplicationActionTests extends ESTestCase { @Override public void run() { try { - replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel(new PlainActionFuture<>())); + replicaOperationTransportHandler.messageReceived(new Request().setShardId(shardId), createTransportChannel(new PlainActionFuture<>())); } catch (Exception e) { } } @@ -672,7 +670,7 @@ public class TransportReplicationActionTests extends ESTestCase { t.start(); // shard operation should be ongoing, so the counter is at 2 // we have to wait here because increment happens in thread - awaitBusy(() -> count.get() == 2); + assertBusy(() -> assertIndexShardCounter(2)); ((ActionWithDelay) action).countDownLatch.countDown(); t.join(); // operation should have finished and counter decreased because no outstanding replica requests From a7185a1d319488e602be3af8c2e9731e733a0886 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 15 Jan 2016 12:31:12 -0500 Subject: [PATCH 8/8] Simplify equality test in IndexShard#sameException This commit simplifies an equality test in IndexShard#sameException where the messages for two exceptions are being compared. The previous condition first tested logical equality if the left exception is not null, and otherwise tested reference equality. There is a convenience method since JDK 7 for testing equality in this way: Objects#equals. Closes #16025 --- core/src/main/java/org/elasticsearch/index/IndexService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 6b79846d0f4..05893ae3813 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -25,6 +25,7 @@ import java.nio.file.Path; import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -715,8 +716,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private static boolean sameException(Exception left, Exception right) { if (left.getClass() == right.getClass()) { - if ((left.getMessage() != null && left.getMessage().equals(right.getMessage())) - || left.getMessage() == right.getMessage()) { + if (Objects.equals(left.getMessage(), right.getMessage())) { StackTraceElement[] stackTraceLeft = left.getStackTrace(); StackTraceElement[] stackTraceRight = right.getStackTrace(); if (stackTraceLeft.length == stackTraceRight.length) {