Disabled query size estimation in percolator, because this is too expensive cpu wise.

Lucene's RamUsageEstimator.sizeOf(Object) is to expensive.
Query size estimation will be enabled when a cheaper way of query size estimation can be found.

Closes #5372
Relates to #5339
This commit is contained in:
Martijn van Groningen 2014-03-09 22:26:59 +07:00
parent a895349be5
commit 6f8f773f8c
4 changed files with 22 additions and 21 deletions

View File

@ -988,7 +988,6 @@
<exclude>org/elasticsearch/bootstrap/Bootstrap.class</exclude> <exclude>org/elasticsearch/bootstrap/Bootstrap.class</exclude>
<exclude>org/elasticsearch/Version.class</exclude> <exclude>org/elasticsearch/Version.class</exclude>
<exclude>org/apache/lucene/queries/XTermsFilter.class</exclude> <exclude>org/apache/lucene/queries/XTermsFilter.class</exclude>
<exclude>org/elasticsearch/index/percolator/stats/ShardPercolateService$RamEstimator.class</exclude>
<exclude>org/elasticsearch/index/merge/Merges.class</exclude> <exclude>org/elasticsearch/index/merge/Merges.class</exclude>
<!-- end excludes for valid system-out --> <!-- end excludes for valid system-out -->
<!-- start excludes for Unsafe --> <!-- start excludes for Unsafe -->

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.index.percolator.stats; package org.elasticsearch.index.percolator.stats;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
@ -37,7 +38,7 @@ public class PercolateStats implements Streamable, ToXContent {
private long percolateCount; private long percolateCount;
private long percolateTimeInMillis; private long percolateTimeInMillis;
private long current; private long current;
private long memorySizeInBytes; private long memorySizeInBytes = -1;
private long numQueries; private long numQueries;
/** /**
@ -90,7 +91,9 @@ public class PercolateStats implements Streamable, ToXContent {
} }
/** /**
* @return The total size the loaded queries take in memory. * @return Temporarily returns <code>-1</code>, but this used to return the total size the loaded queries take in
* memory, but this is disabled now because the size estimation was too expensive cpu wise. This will be enabled
* again when a cheaper size estimation can be found.
*/ */
public long getMemorySizeInBytes() { public long getMemorySizeInBytes() {
return memorySizeInBytes; return memorySizeInBytes;
@ -124,7 +127,6 @@ public class PercolateStats implements Streamable, ToXContent {
percolateCount += percolate.getCount(); percolateCount += percolate.getCount();
percolateTimeInMillis += percolate.getTimeInMillis(); percolateTimeInMillis += percolate.getTimeInMillis();
current += percolate.getCurrent(); current += percolate.getCurrent();
memorySizeInBytes += percolate.getMemorySizeInBytes();
numQueries += percolate.getNumQueries(); numQueries += percolate.getNumQueries();
} }
@ -150,7 +152,11 @@ public class PercolateStats implements Streamable, ToXContent {
percolateCount = in.readVLong(); percolateCount = in.readVLong();
percolateTimeInMillis = in.readVLong(); percolateTimeInMillis = in.readVLong();
current = in.readVLong(); current = in.readVLong();
memorySizeInBytes = in.readVLong(); if (in.getVersion().before(Version.V_1_1_0)) {
in.readVLong();
} else {
in.readLong();
}
numQueries = in.readVLong(); numQueries = in.readVLong();
} }
@ -159,7 +165,11 @@ public class PercolateStats implements Streamable, ToXContent {
out.writeVLong(percolateCount); out.writeVLong(percolateCount);
out.writeVLong(percolateTimeInMillis); out.writeVLong(percolateTimeInMillis);
out.writeVLong(current); out.writeVLong(current);
out.writeVLong(memorySizeInBytes); if (out.getVersion().before(Version.V_1_1_0)) {
out.writeVLong(0);
} else {
out.writeLong(-1);
}
out.writeVLong(numQueries); out.writeVLong(numQueries);
} }
} }

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.percolator.stats; package org.elasticsearch.index.percolator.stats;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.CounterMetric;
@ -38,7 +37,6 @@ import java.util.concurrent.TimeUnit;
* <li> total time spent in percolate api * <li> total time spent in percolate api
* <li> the current number of percolate requests * <li> the current number of percolate requests
* <li> number of registered percolate queries * <li> number of registered percolate queries
* <li> the estimated amount of memory the registered queries take
* </ul> * </ul>
*/ */
public class ShardPercolateService extends AbstractIndexShardComponent { public class ShardPercolateService extends AbstractIndexShardComponent {
@ -52,7 +50,6 @@ public class ShardPercolateService extends AbstractIndexShardComponent {
private final CounterMetric currentMetric = new CounterMetric(); private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric(); private final CounterMetric numberOfQueries = new CounterMetric();
private final CounterMetric memorySizeInBytes = new CounterMetric();
public void prePercolate() { public void prePercolate() {
currentMetric.inc(); currentMetric.inc();
@ -64,27 +61,22 @@ public class ShardPercolateService extends AbstractIndexShardComponent {
} }
public void addedQuery(HashedBytesRef id, Query previousQuery, Query newQuery) { public void addedQuery(HashedBytesRef id, Query previousQuery, Query newQuery) {
if (previousQuery != null) { numberOfQueries.inc();
memorySizeInBytes.dec(computeSizeInMemory(id, previousQuery));
} else {
numberOfQueries.inc();
}
memorySizeInBytes.inc(computeSizeInMemory(id, newQuery));
} }
public void removedQuery(HashedBytesRef id, Query query) { public void removedQuery(HashedBytesRef id, Query query) {
numberOfQueries.dec(); numberOfQueries.dec();
memorySizeInBytes.dec(computeSizeInMemory(id, query));
} }
/** /**
* @return The current metrics * @return The current metrics
*/ */
public PercolateStats stats() { public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), memorySizeInBytes.count(), numberOfQueries.count()); return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
} }
private static long computeSizeInMemory(HashedBytesRef id, Query query) { // Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length; long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query); size += RamEstimator.sizeOf(query);
return size; return size;
@ -96,6 +88,6 @@ public class ShardPercolateService extends AbstractIndexShardComponent {
static long sizeOf(Query query) { static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query); return RamUsageEstimator.sizeOf(query);
} }
} }*/
} }

View File

@ -558,7 +558,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries)); assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries));
assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l)); assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), greaterThan(0l)); assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
long percolateCount = 0; long percolateCount = 0;
@ -580,7 +580,7 @@ public class PercolatorTests extends ElasticsearchIntegrationTest {
assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries * 2)); assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries * 2));
assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l)); assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), greaterThan(0l)); assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
percolateCount = 0; percolateCount = 0;
nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();