Merge pull request #15356 from rjernst/clean_benchmarks

Remove benchmark package
This commit is contained in:
Ryan Ernst 2015-12-10 19:30:25 -08:00
commit 7f4ef9f586
59 changed files with 0 additions and 8482 deletions

View File

@ -1,139 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.aliases;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.monitor.jvm.JvmStats;
import org.elasticsearch.node.Node;
import java.io.IOException;
import java.util.List;
/**
*/
public class AliasesBenchmark {
private final static String INDEX_NAME = "my-index";
public static void main(String[] args) throws IOException {
int NUM_ADDITIONAL_NODES = 1;
int BASE_ALIAS_COUNT = 100000;
int NUM_ADD_ALIAS_REQUEST = 1000;
Settings settings = Settings.settingsBuilder()
.put("node.master", false).build();
Node node1 = new Node(
Settings.settingsBuilder().put(settings).put("node.master", true).build()
).start();
Node[] otherNodes = new Node[NUM_ADDITIONAL_NODES];
for (int i = 0; i < otherNodes.length; i++) {
otherNodes[i] = new Node(settings).start();
}
Client client = node1.client();
try {
client.admin().indices().prepareCreate(INDEX_NAME).execute().actionGet();
} catch (IndexAlreadyExistsException e) {}
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
int numberOfAliases = countAliases(client);
System.out.println("Number of aliases: " + numberOfAliases);
if (numberOfAliases < BASE_ALIAS_COUNT) {
int diff = BASE_ALIAS_COUNT - numberOfAliases;
System.out.println("Adding " + diff + " more aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
for (int i = 1; i <= diff; i++) {
builder.addAlias(INDEX_NAME, Strings.randomBase64UUID());
if (i % 1000 == 0) {
builder.execute().actionGet();
builder = client.admin().indices().prepareAliases();
}
}
if (!builder.request().getAliasActions().isEmpty()) {
builder.execute().actionGet();
}
} else if (numberOfAliases > BASE_ALIAS_COUNT) {
IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
int diff = numberOfAliases - BASE_ALIAS_COUNT;
System.out.println("Removing " + diff + " aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
List<AliasMetaData> aliases= client.admin().indices().prepareGetAliases("*")
.addIndices(INDEX_NAME)
.execute().actionGet().getAliases().get(INDEX_NAME);
for (int i = 0; i <= diff; i++) {
builder.removeAlias(INDEX_NAME, aliases.get(i).alias());
if (i % 1000 == 0) {
builder.execute().actionGet();
builder = client.admin().indices().prepareAliases();
}
}
if (!builder.request().getAliasActions().isEmpty()) {
builder.execute().actionGet();
}
}
numberOfAliases = countAliases(client);
System.out.println("Number of aliases: " + numberOfAliases);
long totalTime = 0;
int max = numberOfAliases + NUM_ADD_ALIAS_REQUEST;
for (int i = numberOfAliases; i <= max; i++) {
if (i != numberOfAliases && i % 100 == 0) {
long avgTime = totalTime / 100;
System.out.println("Added [" + (i - numberOfAliases) + "] aliases. Avg create time: " + avgTime + " ms");
System.out.println("Heap used [" + JvmStats.jvmStats().getMem().getHeapUsed() + "]");
totalTime = 0;
}
long time = System.currentTimeMillis();
// String filter = termFilter("field" + i, "value" + i).toXContent(XContentFactory.jsonBuilder(), null).string();
client.admin().indices().prepareAliases().addAlias(INDEX_NAME, Strings.randomBase64UUID()/*, filter*/)
.execute().actionGet();
totalTime += System.currentTimeMillis() - time;
}
System.gc();
System.out.println("Final heap used [" + JvmStats.jvmStats().getMem().getHeapUsed() + "]");
System.out.println("Number of aliases: " + countAliases(client));
client.close();
node1.close();
for (Node otherNode : otherNodes) {
otherNode.close();
}
}
private static int countAliases(Client client) {
GetAliasesResponse response = client.admin().indices().prepareGetAliases("*")
.addIndices(INDEX_NAME)
.execute().actionGet();
if (response.getAliases().isEmpty()) {
return 0;
} else {
return response.getAliases().get(INDEX_NAME).size();
}
}
}

View File

@ -1,63 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.bloom;
import org.apache.lucene.codecs.bloom.FuzzySet;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.util.BloomFilter;
import java.security.SecureRandom;
/**
*/
public class BloomBench {
public static void main(String[] args) throws Exception {
SecureRandom random = new SecureRandom();
final int ELEMENTS = (int) SizeValue.parseSizeValue("1m").singles();
final double fpp = 0.01;
BloomFilter gFilter = BloomFilter.create(ELEMENTS, fpp);
System.out.println("G SIZE: " + new ByteSizeValue(gFilter.getSizeInBytes()));
FuzzySet lFilter = FuzzySet.createSetBasedOnMaxMemory((int) gFilter.getSizeInBytes());
//FuzzySet lFilter = FuzzySet.createSetBasedOnQuality(ELEMENTS, 0.97f);
for (int i = 0; i < ELEMENTS; i++) {
BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
gFilter.put(bytesRef);
lFilter.addValue(bytesRef);
}
int lFalse = 0;
int gFalse = 0;
for (int i = 0; i < ELEMENTS; i++) {
BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
if (gFilter.mightContain(bytesRef)) {
gFalse++;
}
if (lFilter.contains(bytesRef) == FuzzySet.ContainsResult.MAYBE) {
lFalse++;
}
}
System.out.println("Failed positives, g[" + gFalse + "], l[" + lFalse + "]");
}
}

View File

@ -1,188 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.breaker;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import static junit.framework.Assert.assertNotNull;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
/**
* Benchmarks for different implementations of the circuit breaker
*/
public class CircuitBreakerBenchmark {
private static final String INDEX = UUID.randomUUID().toString();
private static final int QUERIES = 100;
private static final int BULK_SIZE = 100;
private static final int NUM_DOCS = 2_000_000;
private static final int AGG_SIZE = 25;
private static void switchToNoop(Client client) {
Settings settings = settingsBuilder()
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, CircuitBreaker.Type.NOOP)
.build();
client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
}
private static void switchToMemory(Client client) {
Settings settings = settingsBuilder()
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, CircuitBreaker.Type.MEMORY)
.build();
client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
}
private static void runSingleThreadedQueries(Client client) {
long totalTime = 0;
for (int i = 0; i < QUERIES; i++) {
if (i % 10 == 0) {
System.out.println("--> query #" + i);
}
SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
.addAggregation(
terms("myterms")
.size(AGG_SIZE)
.field("num")
).setSize(0).get();
Terms terms = resp.getAggregations().get("myterms");
assertNotNull("term aggs were calculated", terms);
totalTime += resp.getTookInMillis();
}
System.out.println("--> single threaded average time: " + (totalTime / QUERIES) + "ms");
}
private static void runMultiThreadedQueries(final Client client) throws Exception {
final AtomicLong totalThreadedTime = new AtomicLong(0);
int THREADS = 10;
Thread threads[] = new Thread[THREADS];
for (int i = 0; i < THREADS; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
long tid = Thread.currentThread().getId();
for (int i = 0; i < QUERIES; i++) {
if (i % 30 == 0) {
System.out.println("--> [" + tid + "] query # "+ i);
}
SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
.addAggregation(
terms("myterms")
.size(AGG_SIZE)
.field("num")
).setSize(0).get();
Terms terms = resp.getAggregations().get("myterms");
assertNotNull("term aggs were calculated", terms);
totalThreadedTime.addAndGet(resp.getTookInMillis());
}
}
});
}
System.out.println("--> starting " + THREADS + " threads for parallel aggregating");
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
System.out.println("--> threaded average time: " + (totalThreadedTime.get() / (THREADS * QUERIES)) + "ms");
}
public static void main(String args[]) throws Exception {
Node node = new Node(Settings.EMPTY);
final Client client = node.client();
try {
try {
client.admin().indices().prepareDelete(INDEX).get();
} catch (Exception e) {
// Ignore
}
try {
client.admin().indices().prepareCreate(INDEX).setSettings(
settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0)).get();
} catch (IndexAlreadyExistsException e) {}
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
System.out.println("--> indexing: " + NUM_DOCS + " documents...");
BulkRequestBuilder bulkBuilder = client.prepareBulk();
for (int i = 0; i < NUM_DOCS; i++) {
bulkBuilder.add(client.prepareIndex(INDEX, "doc").setSource("num", i));
if (i % BULK_SIZE == 0) {
// Send off bulk request
bulkBuilder.get();
// Create a new holder
bulkBuilder = client.prepareBulk();
}
}
bulkBuilder.get();
client.admin().indices().prepareRefresh(INDEX).get();
SearchResponse countResp = client.prepareSearch(INDEX).setQuery(matchAllQuery()).setSize(0).get();
assert countResp.getHits().getTotalHits() == NUM_DOCS : "all docs should be indexed";
final int warmupCount = 100;
for (int i = 0; i < warmupCount; i++) {
if (i % 15 == 0) {
System.out.println("--> warmup #" + i);
}
SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
.addAggregation(
terms("myterms")
.size(AGG_SIZE)
.field("num")
).setSize(0).get();
Terms terms = resp.getAggregations().get("myterms");
assertNotNull("term aggs were calculated", terms);
}
System.out.println("--> running single-threaded tests");
runSingleThreadedQueries(client);
System.out.println("--> switching to NOOP breaker");
switchToNoop(client);
runSingleThreadedQueries(client);
switchToMemory(client);
System.out.println("--> running multi-threaded tests");
runMultiThreadedQueries(client);
System.out.println("--> switching to NOOP breaker");
switchToNoop(client);
runMultiThreadedQueries(client);
} finally {
client.close();
node.close();
}
}
}

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.checksum;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import java.security.MessageDigest;
import java.util.zip.Adler32;
import java.util.zip.CRC32;
/**
*
*/
public class ChecksumBenchmark {
public static final int BATCH_SIZE = 16 * 1024;
public static void main(String[] args) throws Exception {
System.out.println("Warning up");
long warmSize = ByteSizeValue.parseBytesSizeValue("1g", null).bytes();
crc(warmSize);
adler(warmSize);
md5(warmSize);
long dataSize = ByteSizeValue.parseBytesSizeValue("10g", null).bytes();
System.out.println("Running size: " + dataSize);
crc(dataSize);
adler(dataSize);
md5(dataSize);
}
private static void crc(long dataSize) {
long start = System.currentTimeMillis();
CRC32 crc = new CRC32();
byte[] data = new byte[BATCH_SIZE];
long iter = dataSize / BATCH_SIZE;
for (long i = 0; i < iter; i++) {
crc.update(data);
}
crc.getValue();
System.out.println("CRC took " + new TimeValue(System.currentTimeMillis() - start));
}
private static void adler(long dataSize) {
long start = System.currentTimeMillis();
Adler32 crc = new Adler32();
byte[] data = new byte[BATCH_SIZE];
long iter = dataSize / BATCH_SIZE;
for (long i = 0; i < iter; i++) {
crc.update(data);
}
crc.getValue();
System.out.println("Adler took " + new TimeValue(System.currentTimeMillis() - start));
}
private static void md5(long dataSize) throws Exception {
long start = System.currentTimeMillis();
byte[] data = new byte[BATCH_SIZE];
long iter = dataSize / BATCH_SIZE;
MessageDigest digest = MessageDigest.getInstance("MD5");
for (long i = 0; i < iter; i++) {
digest.update(data);
}
digest.digest();
System.out.println("md5 took " + new TimeValue(System.currentTimeMillis() - start));
}
}

View File

@ -1,88 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.cluster;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Random;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
public class ClusterAllocationRerouteBenchmark {
private static final ESLogger logger = Loggers.getLogger(ClusterAllocationRerouteBenchmark.class);
public static void main(String[] args) {
final int numberOfRuns = 1;
final int numIndices = 5 * 365; // five years
final int numShards = 6;
final int numReplicas = 2;
final int numberOfNodes = 30;
final int numberOfTags = 2;
AllocationService strategy = ESAllocationTestCase.createAllocationService(Settings.builder()
.put("cluster.routing.allocation.awareness.attributes", "tag")
.build(), new Random(1));
MetaData.Builder mb = MetaData.builder();
for (int i = 1; i <= numIndices; i++) {
mb.put(IndexMetaData.builder("test_" + i).numberOfShards(numShards).numberOfReplicas(numReplicas));
}
MetaData metaData = mb.build();
RoutingTable.Builder rb = RoutingTable.builder();
for (int i = 1; i <= numIndices; i++) {
rb.addAsNew(metaData.index("test_" + i));
}
RoutingTable routingTable = rb.build();
DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
for (int i = 1; i <= numberOfNodes; i++) {
nb.put(ESAllocationTestCase.newNode("node" + i, singletonMap("tag", "tag_" + (i % numberOfTags))));
}
ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).nodes(nb).build();
long start = System.currentTimeMillis();
for (int i = 0; i < numberOfRuns; i++) {
logger.info("[{}] starting... ", i);
long runStart = System.currentTimeMillis();
ClusterState clusterState = initialClusterState;
while (clusterState.getRoutingNodes().hasUnassignedShards()) {
logger.info("[{}] remaining unassigned {}", i, clusterState.getRoutingNodes().unassigned().size());
RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
result = strategy.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
}
logger.info("[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
}
long took = System.currentTimeMillis() - start;
logger.info("total took {}, AVG {}", TimeValue.timeValueMillis(took), TimeValue.timeValueMillis(took / numberOfRuns));
}
}

View File

@ -1,97 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.common.lucene.uidscan;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.*;
import org.apache.lucene.store.FSDirectory;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.SizeValue;
import java.nio.file.Paths;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
/**
*
*/
public class LuceneUidScanBenchmark {
public static void main(String[] args) throws Exception {
FSDirectory dir = FSDirectory.open(PathUtils.get("work/test"));
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
final int NUMBER_OF_THREADS = 2;
final long INDEX_COUNT = SizeValue.parseSizeValue("1m").singles();
final long SCAN_COUNT = SizeValue.parseSizeValue("100k").singles();
final long startUid = 1000000;
long LIMIT = startUid + INDEX_COUNT;
StopWatch watch = new StopWatch().start();
System.out.println("Indexing " + INDEX_COUNT + " docs...");
for (long i = startUid; i < LIMIT; i++) {
Document doc = new Document();
doc.add(new StringField("_uid", Long.toString(i), Store.NO));
doc.add(new NumericDocValuesField("_version", i));
writer.addDocument(doc);
}
System.out.println("Done indexing, took " + watch.stop().lastTaskTime());
final IndexReader reader = DirectoryReader.open(writer, true);
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
for (long i = 0; i < SCAN_COUNT; i++) {
long id = startUid + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
final long version = Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
if (version != id) {
System.err.println("wrong id...");
break;
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
latch.countDown();
}
}
});
}
watch = new StopWatch().start();
for (int i = 0; i < threads.length; i++) {
threads[i].start();
}
latch.await();
watch.stop();
System.out.println("Scanned in " + watch.totalTime() + " TP Seconds " + ((SCAN_COUNT * NUMBER_OF_THREADS) / watch.totalTime().secondsFrac()));
}
}

View File

@ -1,128 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.common.recycler;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.common.recycler.Recyclers.concurrent;
import static org.elasticsearch.common.recycler.Recyclers.concurrentDeque;
import static org.elasticsearch.common.recycler.Recyclers.deque;
import static org.elasticsearch.common.recycler.Recyclers.dequeFactory;
import static org.elasticsearch.common.recycler.Recyclers.locked;
import static org.elasticsearch.common.recycler.Recyclers.none;
/** Benchmark that tries to measure the overhead of object recycling depending on concurrent access. */
public class RecyclerBenchmark {
private static final long NUM_RECYCLES = 5000000L;
private static final Random RANDOM = new Random(0);
private static long bench(final Recycler<?> recycler, long numRecycles, int numThreads) throws InterruptedException {
final AtomicLong recycles = new AtomicLong(numRecycles);
final CountDownLatch latch = new CountDownLatch(1);
final Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; ++i){
// Thread ids happen to be generated sequentially, so we also generate random threads so that distribution of IDs
// is not perfect for the concurrent recycler
for (int j = RANDOM.nextInt(5); j >= 0; --j) {
new Thread();
}
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
} catch (InterruptedException e) {
return;
}
while (recycles.getAndDecrement() > 0) {
final Recycler.V<?> v = recycler.obtain();
v.close();
}
}
};
}
for (Thread thread : threads) {
thread.start();
}
final long start = System.nanoTime();
latch.countDown();
for (Thread thread : threads) {
thread.join();
}
return System.nanoTime() - start;
}
public static void main(String[] args) throws InterruptedException {
final int limit = 100;
final Recycler.C<Object> c = new AbstractRecyclerC<Object>() {
@Override
public Object newInstance(int sizing) {
return new Object();
}
@Override
public void recycle(Object value) {
// do nothing
}
};
Map<String, Recycler<Object>> recyclers = new HashMap<>();
recyclers.put("none", none(c));
recyclers.put("concurrent-queue", concurrentDeque(c, limit));
recyclers.put("locked", locked(deque(c, limit)));
recyclers.put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors()));
// warmup
final long start = System.nanoTime();
while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
for (Recycler<?> recycler : recyclers.values()) {
bench(recycler, NUM_RECYCLES, 2);
}
}
// run
for (int numThreads = 1; numThreads <= 4 * Runtime.getRuntime().availableProcessors(); numThreads *= 2) {
System.out.println("## " + numThreads + " threads\n");
System.gc();
Thread.sleep(1000);
for (Recycler<?> recycler : recyclers.values()) {
bench(recycler, NUM_RECYCLES, numThreads);
}
for (int i = 0; i < 5; ++i) {
for (Map.Entry<String, Recycler<Object>> entry : recyclers.entrySet()) {
System.out.println(entry.getKey() + "\t" + TimeUnit.NANOSECONDS.toMillis(bench(entry.getValue(), NUM_RECYCLES, numThreads)));
}
System.out.println();
}
}
}
}

View File

@ -1,66 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.counter;
import org.elasticsearch.common.StopWatch;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
/**
*
*/
public class SimpleCounterBenchmark {
private static long NUMBER_OF_ITERATIONS = 10000000;
private static int NUMBER_OF_THREADS = 100;
public static void main(String[] args) throws Exception {
final AtomicLong counter = new AtomicLong();
StopWatch stopWatch = new StopWatch().start();
System.out.println("Running " + NUMBER_OF_ITERATIONS);
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
counter.incrementAndGet();
}
System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
counter.incrementAndGet();
}
latch.countDown();
}
});
}
stopWatch = new StopWatch().start();
for (Thread thread : threads) {
thread.start();
}
latch.await();
stopWatch.stop();
System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
}
}

View File

@ -1,72 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.fs;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.unit.ByteSizeValue;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.Random;
/**
*
*/
public class FsAppendBenchmark {
public static void main(String[] args) throws Exception {
Path path = PathUtils.get("work/test.log");
IOUtils.deleteFilesIgnoringExceptions(path);
int CHUNK = (int) ByteSizeValue.parseBytesSizeValue("1k", "CHUNK").bytes();
long DATA = ByteSizeValue.parseBytesSizeValue("10gb", "DATA").bytes();
byte[] data = new byte[CHUNK];
new Random().nextBytes(data);
StopWatch watch = new StopWatch().start("write");
try (FileChannel channel = FileChannel.open(path, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) {
long position = 0;
while (position < DATA) {
channel.write(ByteBuffer.wrap(data), position);
position += data.length;
}
watch.stop().start("flush");
channel.force(true);
}
watch.stop();
System.out.println("Wrote [" + (new ByteSizeValue(DATA)) + "], chunk [" + (new ByteSizeValue(CHUNK)) + "], in " + watch);
}
private static final ByteBuffer fill = ByteBuffer.allocateDirect(1);
// public static long padLogFile(long position, long currentSize, long preAllocSize) throws IOException {
// if (position + 4096 >= currentSize) {
// currentSize = currentSize + preAllocSize;
// fill.position(0);
// f.getChannel().write(fill, currentSize - fill.remaining());
// }
// return currentSize;
// }
}

View File

@ -1,55 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.get;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.node.Node;
// simple test for embedded / single remote lookup
public class SimpleGetActionBenchmark {
public static void main(String[] args) {
long OPERATIONS = SizeValue.parseSizeValue("300k").singles();
Node node = new Node(Settings.EMPTY);
Client client;
if (false) {
client = new Node(Settings.builder().put("node.client", true).build()).start().client();
} else {
client = node.client();
}
client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
StopWatch stopWatch = new StopWatch().start();
for (long i = 0; i < OPERATIONS; i++) {
client.prepareGet("test", "type1", "1").execute().actionGet();
}
stopWatch.stop();
System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));
node.close();
}
}

View File

@ -1,262 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.hppc;
import com.carrotsearch.hppc.IntIntHashMap;
import com.carrotsearch.hppc.IntObjectHashMap;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.ObjectObjectHashMap;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.unit.SizeValue;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.concurrent.ThreadLocalRandom;
// TODO: these benchmarks aren't too good and may be easily skewed by jit doing
// escape analysis/ side-effects/ local
// optimisations. Proper benchmarks with JMH (bulk ops, single-shot mode)
// should be better here.
// https://github.com/carrotsearch/hppc/blob/master/hppc-benchmarks/src/main/java/com/carrotsearch/hppc/benchmarks/B003_HashSet_Contains.java
public class StringMapAdjustOrPutBenchmark {
public static void main(String[] args) {
int NUMBER_OF_KEYS = (int) SizeValue.parseSizeValue("20").singles();
int STRING_SIZE = 5;
long PUT_OPERATIONS = SizeValue.parseSizeValue("5m").singles();
long ITERATIONS = 10;
boolean REUSE = true;
String[] values = new String[NUMBER_OF_KEYS];
for (int i = 0; i < values.length; i++) {
values[i] = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), STRING_SIZE);
}
StopWatch stopWatch;
stopWatch = new StopWatch().start();
ObjectIntHashMap<String> map = new ObjectIntHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
map.clear();
} else {
map = new ObjectIntHashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
map.clear();
map = null;
stopWatch.stop();
System.out.println("TObjectIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
stopWatch = new StopWatch().start();
// TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
ObjectIntHashMap<String> iMap = new ObjectIntHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
iMap.clear();
} else {
iMap = new ObjectIntHashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
stopWatch.stop();
System.out.println("TObjectIntCustomHashMap(StringIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
iMap.clear();
iMap = null;
stopWatch = new StopWatch().start();
iMap = new ObjectIntHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
iMap.clear();
} else {
iMap = new ObjectIntHashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
stopWatch.stop();
System.out.println("TObjectIntCustomHashMap(PureIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
iMap.clear();
iMap = null;
// now test with THashMap
stopWatch = new StopWatch().start();
ObjectObjectHashMap<String, StringEntry> tMap = new ObjectObjectHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
tMap.clear();
} else {
tMap = new ObjectObjectHashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
String key = values[(int) (i % NUMBER_OF_KEYS)];
StringEntry stringEntry = tMap.get(key);
if (stringEntry == null) {
stringEntry = new StringEntry(key, 1);
tMap.put(key, stringEntry);
} else {
stringEntry.counter++;
}
}
}
tMap.clear();
tMap = null;
stopWatch.stop();
System.out.println("THashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
stopWatch = new StopWatch().start();
HashMap<String, StringEntry> hMap = new HashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
hMap.clear();
} else {
hMap = new HashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
String key = values[(int) (i % NUMBER_OF_KEYS)];
StringEntry stringEntry = hMap.get(key);
if (stringEntry == null) {
stringEntry = new StringEntry(key, 1);
hMap.put(key, stringEntry);
} else {
stringEntry.counter++;
}
}
}
hMap.clear();
hMap = null;
stopWatch.stop();
System.out.println("HashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
stopWatch = new StopWatch().start();
IdentityHashMap<String, StringEntry> ihMap = new IdentityHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
ihMap.clear();
} else {
hMap = new HashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
String key = values[(int) (i % NUMBER_OF_KEYS)];
StringEntry stringEntry = ihMap.get(key);
if (stringEntry == null) {
stringEntry = new StringEntry(key, 1);
ihMap.put(key, stringEntry);
} else {
stringEntry.counter++;
}
}
}
stopWatch.stop();
System.out.println("IdentityHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
ihMap.clear();
ihMap = null;
int[] iValues = new int[NUMBER_OF_KEYS];
for (int i = 0; i < values.length; i++) {
iValues[i] = ThreadLocalRandom.current().nextInt();
}
stopWatch = new StopWatch().start();
IntIntHashMap intMap = new IntIntHashMap();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
intMap.clear();
} else {
intMap = new IntIntHashMap();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
int key = iValues[(int) (i % NUMBER_OF_KEYS)];
intMap.addTo(key, 1);
}
}
stopWatch.stop();
System.out.println("TIntIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
intMap.clear();
intMap = null;
// now test with THashMap
stopWatch = new StopWatch().start();
IntObjectHashMap<IntEntry> tIntMap = new IntObjectHashMap<>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
tIntMap.clear();
} else {
tIntMap = new IntObjectHashMap<>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
int key = iValues[(int) (i % NUMBER_OF_KEYS)];
IntEntry intEntry = tIntMap.get(key);
if (intEntry == null) {
intEntry = new IntEntry(key, 1);
tIntMap.put(key, intEntry);
} else {
intEntry.counter++;
}
}
}
tIntMap.clear();
tIntMap = null;
stopWatch.stop();
System.out.println("TIntObjectHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
}
static class StringEntry {
String key;
int counter;
StringEntry(String key, int counter) {
this.key = key;
this.counter = counter;
}
}
static class IntEntry {
int key;
int counter;
IntEntry(int key, int counter) {
this.key = key;
this.counter = counter;
}
}
}

View File

@ -1,151 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.mapping;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.node.Node;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
/**
*/
@SuppressForbidden(reason = "not really source code or a test")
public class ManyMappingsBenchmark {
private static final String MAPPING = "{\n" +
" \"dynamic_templates\": [\n" +
" {\n" +
" \"t1\": {\n" +
" \"mapping\": {\n" +
" \"store\": false,\n" +
" \"norms\": {\n" +
" \"enabled\": false\n" +
" },\n" +
" \"type\": \"string\"\n" +
" },\n" +
" \"match\": \"*_ss\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"t2\": {\n" +
" \"mapping\": {\n" +
" \"store\": false,\n" +
" \"type\": \"date\"\n" +
" },\n" +
" \"match\": \"*_dt\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"t3\": {\n" +
" \"mapping\": {\n" +
" \"store\": false,\n" +
" \"type\": \"integer\"\n" +
" },\n" +
" \"match\": \"*_i\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"_source\": {\n" +
" \"enabled\": false\n" +
" },\n" +
" \"properties\": {}\n" +
" }";
private static final String INDEX_NAME = "index";
private static final String TYPE_NAME = "type";
private static final int FIELD_COUNT = 100000;
private static final int DOC_COUNT = 10000000;
private static final boolean TWO_NODES = true;
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put("")
.put(SETTING_NUMBER_OF_SHARDS, 5)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", ManyMappingsBenchmark.class.getSimpleName())
.build();
Node node = new Node(settings).start();
if (TWO_NODES) {
Node node2 = new Node(settings).start();
}
Client client = node.client();
client.admin().indices().prepareDelete(INDEX_NAME)
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
.get();
client.admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, MAPPING)
.get();
BulkRequestBuilder builder = client.prepareBulk();
int fieldCount = 0;
long time = System.currentTimeMillis();
final int PRINT = 1000;
for (int i = 0; i < DOC_COUNT; i++) {
XContentBuilder sourceBuilder = jsonBuilder().startObject();
sourceBuilder.field(++fieldCount + "_ss", "xyz");
sourceBuilder.field(++fieldCount + "_dt", System.currentTimeMillis());
sourceBuilder.field(++fieldCount + "_i", i % 100);
sourceBuilder.endObject();
if (fieldCount >= FIELD_COUNT) {
fieldCount = 0;
System.out.println("dynamic fields rolled up");
}
builder.add(
client.prepareIndex(INDEX_NAME, TYPE_NAME, String.valueOf(i))
.setSource(sourceBuilder)
);
if (builder.numberOfActions() >= 1000) {
builder.get();
builder = client.prepareBulk();
}
if (i % PRINT == 0) {
long took = System.currentTimeMillis() - time;
time = System.currentTimeMillis();
System.out.println("Indexed " + i + " docs, in " + TimeValue.timeValueMillis(took));
}
}
if (builder.numberOfActions() > 0) {
builder.get();
}
}
}

View File

@ -1,98 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.monitor.os;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.monitor.os.OsProbe;
@SuppressForbidden(reason = "not really source code or a test")
public class OsProbeBenchmark {
private static final int ITERATIONS = 100_000;
public static void main(String[] args) {
System.setProperty("es.logger.prefix", "");
final ESLogger logger = ESLoggerFactory.getLogger("benchmark");
logger.info("--> loading OS probe");
OsProbe probe = OsProbe.getInstance();
logger.info("--> warming up...");
for (int i = 0; i < ITERATIONS; i++) {
probe.getTotalPhysicalMemorySize();
probe.getFreePhysicalMemorySize();
probe.getTotalSwapSpaceSize();
probe.getFreeSwapSpaceSize();
probe.getSystemLoadAverage();
probe.getSystemCpuPercent();
}
logger.info("--> warmed up");
logger.info("--> testing 'getTotalPhysicalMemorySize' method...");
long start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getTotalPhysicalMemorySize();
}
long elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getFreePhysicalMemorySize' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getFreePhysicalMemorySize();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getTotalSwapSpaceSize' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getTotalSwapSpaceSize();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getFreeSwapSpaceSize' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getFreeSwapSpaceSize();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getSystemLoadAverage' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getSystemLoadAverage();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getSystemCpuPercent' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getSystemCpuPercent();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
}
}

View File

@ -1,131 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.monitor.process;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.monitor.process.ProcessProbe;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadMXBean;
@SuppressForbidden(reason = "use of om.sun.management.ThreadMXBean to compare performance")
public class ProcessProbeBenchmark {
private static final int ITERATIONS = 100_000;
public static void main(String[] args) {
System.setProperty("es.logger.prefix", "");
final ESLogger logger = ESLoggerFactory.getLogger("benchmark");
logger.info("--> loading process probe");
ProcessProbe probe = ProcessProbe.getInstance();
logger.info("--> warming up...");
for (int i = 0; i < ITERATIONS; i++) {
probe.getOpenFileDescriptorCount();
probe.getMaxFileDescriptorCount();
probe.getTotalVirtualMemorySize();
probe.getProcessCpuPercent();
probe.getProcessCpuTotalTime();
}
logger.info("--> warmed up");
logger.info("--> testing 'getOpenFileDescriptorCount' method...");
long start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getOpenFileDescriptorCount();
}
long elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getMaxFileDescriptorCount' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getMaxFileDescriptorCount();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getTotalVirtualMemorySize' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getTotalVirtualMemorySize();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getProcessCpuPercent' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getProcessCpuPercent();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> testing 'getProcessCpuTotalTime' method...");
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
probe.getProcessCpuTotalTime();
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> total [{}] ms, avg [{}] ms", elapsed, (elapsed / (double)ITERATIONS));
logger.info("--> calculating process CPU user time with 'getAllThreadIds + getThreadUserTime' methods...");
final ThreadMXBean threadMxBean = ManagementFactory.getThreadMXBean();
final long[] threadIds = threadMxBean.getAllThreadIds();
long sum = 0;
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
for (long threadId : threadIds) {
sum += threadMxBean.getThreadUserTime(threadId);
}
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> execution time [total: {} ms, avg: {} ms] for {} iterations with average result of {}",
elapsed, (elapsed / (double)ITERATIONS), ITERATIONS, (sum / (double)ITERATIONS));
if (threadMxBean instanceof com.sun.management.ThreadMXBean) {
logger.info("--> calculating process CPU user time with 'getAllThreadIds + getThreadUserTime(long[])' methods...");
final com.sun.management.ThreadMXBean threadMxBean2 = (com.sun.management.ThreadMXBean)threadMxBean;
sum = 0;
start = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; i++) {
long[] user = threadMxBean2.getThreadUserTime(threadIds);
for (int n = 0 ; n != threadIds.length; ++n) {
sum += user[n];
}
}
elapsed = System.currentTimeMillis() - start;
logger.info("--> execution time [total: {} ms, avg: {} ms] for {} iterations with average result of {}",
elapsed, (elapsed / (double)ITERATIONS), ITERATIONS, (sum / (double)ITERATIONS));
}
}
}

View File

@ -1,155 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.percolator;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.percolate.PercolateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.node.Node;
import org.elasticsearch.percolator.PercolatorService;
import java.io.IOException;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
/**
*
*/
public class PercolatorStressBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, 4)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = clientNode.client();
client.admin().indices().create(createIndexRequest("test")).actionGet();
ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth("test")
.setWaitForGreenStatus()
.execute().actionGet();
if (healthResponse.isTimedOut()) {
System.err.println("Quiting, because cluster health requested timed out...");
return;
} else if (healthResponse.getStatus() != ClusterHealthStatus.GREEN) {
System.err.println("Quiting, because cluster state isn't green...");
return;
}
int COUNT = 200000;
int QUERIES = 100;
int TERM_QUERIES = QUERIES / 2;
int RANGE_QUERIES = QUERIES - TERM_QUERIES;
client.prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("numeric1", 1).endObject()).execute().actionGet();
// register queries
int i = 0;
for (; i < TERM_QUERIES; i++) {
client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject()
.field("query", termQuery("name", "value"))
.endObject())
.execute().actionGet();
}
int[] numbers = new int[RANGE_QUERIES];
for (; i < QUERIES; i++) {
client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
.setSource(jsonBuilder().startObject()
.field("query", rangeQuery("numeric1").from(i).to(i))
.endObject())
.execute().actionGet();
numbers[i - TERM_QUERIES] = i;
}
StopWatch stopWatch = new StopWatch().start();
System.out.println("Percolating [" + COUNT + "] ...");
for (i = 1; i <= COUNT; i++) {
XContentBuilder source;
int expectedMatches;
if (i % 2 == 0) {
source = source(Integer.toString(i), "value");
expectedMatches = TERM_QUERIES;
} else {
int number = numbers[i % RANGE_QUERIES];
source = source(Integer.toString(i), number);
expectedMatches = 1;
}
PercolateResponse percolate = client.preparePercolate()
.setIndices("test").setDocumentType("type1")
.setSource(source)
.execute().actionGet();
if (percolate.getMatches().length != expectedMatches) {
System.err.println("No matching number of queries");
}
if ((i % 10000) == 0) {
System.out.println("Percolated " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("Percolation took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
clientNode.close();
for (Node node : nodes) {
node.close();
}
}
private static XContentBuilder source(String id, String nameValue) throws IOException {
return jsonBuilder().startObject().startObject("doc")
.field("id", id)
.field("name", nameValue)
.endObject().endObject();
}
private static XContentBuilder source(String id, int number) throws IOException {
return jsonBuilder().startObject().startObject("doc")
.field("id", id)
.field("numeric1", number)
.field("numeric2", number)
.field("numeric3", number)
.field("numeric4", number)
.field("numeric5", number)
.field("numeric6", number)
.field("numeric7", number)
.field("numeric8", number)
.field("numeric9", number)
.field("numeric10", number)
.endObject().endObject();
}
}

View File

@ -1,194 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.recovery;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.BackgroundIndexer;
import org.elasticsearch.transport.TransportModule;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
@SuppressForbidden(reason = "not really source code or a test")
public class ReplicaRecoveryBenchmark {
private static final String INDEX_NAME = "index";
private static final String TYPE_NAME = "type";
static int DOC_COUNT = (int) SizeValue.parseSizeValue("40k").singles();
static int CONCURRENT_INDEXERS = 2;
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, "false")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(TransportModule.TRANSPORT_TYPE_KEY, "local")
.put("cluster.name", ReplicaRecoveryBenchmark.class.getSimpleName())
.build();
Node node1 = new Node(settings).start();
final ESLogger logger = ESLoggerFactory.getLogger("benchmark");
final Client client1 = node1.client();
client1.admin().cluster().prepareUpdateSettings().setPersistentSettings("logger.indices.recovery: TRACE").get();
final BackgroundIndexer indexer = new BackgroundIndexer(INDEX_NAME, TYPE_NAME, client1, 0, CONCURRENT_INDEXERS, false, new Random());
indexer.setMinFieldSize(10);
indexer.setMaxFieldSize(150);
try {
client1.admin().indices().prepareDelete(INDEX_NAME).get();
} catch (IndexNotFoundException e) {
}
client1.admin().indices().prepareCreate(INDEX_NAME).get();
indexer.start(DOC_COUNT / 2);
while (indexer.totalIndexedDocs() < DOC_COUNT / 2) {
Thread.sleep(5000);
logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
}
client1.admin().indices().prepareFlush().get();
indexer.continueIndexing(DOC_COUNT / 2);
while (indexer.totalIndexedDocs() < DOC_COUNT) {
Thread.sleep(5000);
logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
}
logger.info("--> starting another node and allocating a shard on it");
Node node2 = new Node(settings).start();
client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1").get();
final AtomicBoolean end = new AtomicBoolean(false);
final Thread backgroundLogger = new Thread(new Runnable() {
long lastTime = System.currentTimeMillis();
long lastDocs = indexer.totalIndexedDocs();
long lastBytes = 0;
long lastTranslogOps = 0;
@Override
public void run() {
while (true) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
if (end.get()) {
return;
}
long currentTime = System.currentTimeMillis();
long currentDocs = indexer.totalIndexedDocs();
RecoveryResponse recoveryResponse = client1.admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).get();
List<RecoveryState> indexRecoveries = recoveryResponse.shardRecoveryStates().get(INDEX_NAME);
long translogOps;
long bytes;
if (indexRecoveries.size() > 0) {
translogOps = indexRecoveries.get(0).getTranslog().recoveredOperations();
bytes = recoveryResponse.shardRecoveryStates().get(INDEX_NAME).get(0).getIndex().recoveredBytes();
} else {
bytes = lastBytes = 0;
translogOps = lastTranslogOps = 0;
}
float seconds = (currentTime - lastTime) / 1000.0F;
logger.info("--> indexed [{}];[{}] doc/s, recovered [{}] MB/s , translog ops [{}]/s ",
currentDocs, (currentDocs - lastDocs) / seconds,
(bytes - lastBytes) / 1024.0F / 1024F / seconds, (translogOps - lastTranslogOps) / seconds);
lastBytes = bytes;
lastTranslogOps = translogOps;
lastTime = currentTime;
lastDocs = currentDocs;
}
}
});
backgroundLogger.start();
client1.admin().cluster().prepareHealth().setWaitForGreenStatus().get();
logger.info("--> green. starting relocation cycles");
long startDocIndexed = indexer.totalIndexedDocs();
indexer.continueIndexing(DOC_COUNT * 50);
long totalRecoveryTime = 0;
long startTime = System.currentTimeMillis();
long[] recoveryTimes = new long[3];
for (int iteration = 0; iteration < 3; iteration++) {
logger.info("--> removing replicas");
client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 0").get();
logger.info("--> adding replica again");
long recoveryStart = System.currentTimeMillis();
client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1").get();
client1.admin().cluster().prepareHealth(INDEX_NAME).setWaitForGreenStatus().setTimeout("15m").get();
long recoveryTime = System.currentTimeMillis() - recoveryStart;
totalRecoveryTime += recoveryTime;
recoveryTimes[iteration] = recoveryTime;
logger.info("--> recovery done in [{}]", new TimeValue(recoveryTime));
// sleep some to let things clean up
Thread.sleep(10000);
}
long endDocIndexed = indexer.totalIndexedDocs();
long totalTime = System.currentTimeMillis() - startTime;
indexer.stop();
end.set(true);
backgroundLogger.interrupt();
backgroundLogger.join();
logger.info("average doc/s [{}], average relocation time [{}], taking [{}], [{}], [{}]", (endDocIndexed - startDocIndexed) * 1000.0 / totalTime, new TimeValue(totalRecoveryTime / 3),
TimeValue.timeValueMillis(recoveryTimes[0]), TimeValue.timeValueMillis(recoveryTimes[1]), TimeValue.timeValueMillis(recoveryTimes[2])
);
client1.close();
node1.close();
node2.close();
}
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeScript1 extends AbstractSearchScript {
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeScript1();
}
@Override
public boolean needsScores() {
return false;
}
}
public static final String NATIVE_SCRIPT_1 = "native_1";
@Override
public Object run() {
return docFieldLongs("x").getValue();
}
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeScript2 extends AbstractSearchScript {
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeScript2();
}
@Override
public boolean needsScores() {
return false;
}
}
public static final String NATIVE_SCRIPT_2 = "native_2";
@Override
public Object run() {
return docFieldLongs("x").getValue() + docFieldDoubles("y").getValue();
}
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeScript3 extends AbstractSearchScript {
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeScript3();
}
@Override
public boolean needsScores() {
return false;
}
}
public static final String NATIVE_SCRIPT_3 = "native_3";
@Override
public Object run() {
return 1.2 * docFieldLongs("x").getValue() / docFieldDoubles("y").getValue();
}
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeScript4 extends AbstractSearchScript {
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeScript4();
}
@Override
public boolean needsScores() {
return false;
}
}
public static final String NATIVE_SCRIPT_4 = "native_4";
@Override
public Object run() {
return Math.sqrt(Math.abs(docFieldDoubles("z").getValue())) + Math.log(Math.abs(docFieldLongs("x").getValue() * docFieldDoubles("y").getValue()));
}
}

View File

@ -1,43 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ScriptModule;
public class NativeScriptPlugin extends Plugin {
@Override
public String name() {
return "native-benchmark-scripts";
}
@Override
public String description() {
return "Native benchmark script";
}
public void onModule(ScriptModule module) {
module.registerScript(NativeScript1.NATIVE_SCRIPT_1, NativeScript1.Factory.class);
module.registerScript(NativeScript2.NATIVE_SCRIPT_2, NativeScript2.Factory.class);
module.registerScript(NativeScript3.NATIVE_SCRIPT_3, NativeScript3.Factory.class);
module.registerScript(NativeScript4.NATIVE_SCRIPT_4, NativeScript4.Factory.class);
}
}

View File

@ -1,171 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.expression;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.sort.ScriptSortBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.joda.time.PeriodType;
import java.util.Collection;
import java.util.Collections;
import java.util.Random;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
public class ScriptComparisonBenchmark {
static final String clusterName = ScriptComparisonBenchmark.class.getSimpleName();
static final String indexName = "test";
static String[] langs = {
"expression",
"native",
"groovy"
};
static String[][] scripts = {
// the first value is the "reference" version (pure math)
{
"x",
"doc['x'].value",
NativeScript1.NATIVE_SCRIPT_1,
"doc['x'].value"
}, {
"x + y",
"doc['x'].value + doc['y'].value",
NativeScript2.NATIVE_SCRIPT_2,
"doc['x'].value + doc['y'].value",
}, {
"1.2 * x / y",
"1.2 * doc['x'].value / doc['y'].value",
NativeScript3.NATIVE_SCRIPT_3,
"1.2 * doc['x'].value / doc['y'].value",
}, {
"sqrt(abs(z)) + ln(abs(x * y))",
"sqrt(abs(doc['z'].value)) + ln(abs(doc['x'].value * doc['y'].value))",
NativeScript4.NATIVE_SCRIPT_4,
"sqrt(abs(doc['z'].value)) + log(abs(doc['x'].value * doc['y'].value))"
}
};
public static void main(String[] args) throws Exception {
int numDocs = 1000000;
int numQueries = 1000;
Client client = setupIndex();
indexDocs(client, numDocs);
for (int scriptNum = 0; scriptNum < scripts.length; ++scriptNum) {
runBenchmark(client, scriptNum, numQueries);
}
}
static void runBenchmark(Client client, int scriptNum, int numQueries) {
System.out.println("");
System.out.println("Script: " + scripts[scriptNum][0]);
System.out.println("--------------------------------");
for (int langNum = 0; langNum < langs.length; ++langNum) {
String lang = langs[langNum];
String script = scripts[scriptNum][langNum + 1];
timeQueries(client, lang, script, numQueries / 10); // warmup
TimeValue time = timeQueries(client, lang, script, numQueries);
printResults(lang, time, numQueries);
}
}
static Client setupIndex() throws Exception {
// create cluster
Settings settings = settingsBuilder().put("name", "node1")
.put("cluster.name", clusterName).build();
Collection<Class<? extends Plugin>> plugins = Collections.<Class<? extends Plugin>>singletonList(NativeScriptPlugin.class);
Node node1 = new MockNode(settings, Version.CURRENT, plugins);
node1.start();
Client client = node1.client();
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
// delete the index, if it exists
try {
client.admin().indices().prepareDelete(indexName).execute().actionGet();
} catch (ElasticsearchException e) {
// ok if the index didn't exist
}
// create mappings
IndicesAdminClient admin = client.admin().indices();
admin.prepareCreate(indexName).addMapping("doc", "x", "type=long", "y", "type=double");
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
return client;
}
static void indexDocs(Client client, int numDocs) {
System.out.print("Indexing " + numDocs + " random docs...");
BulkRequestBuilder bulkRequest = client.prepareBulk();
Random r = new Random(1);
for (int i = 0; i < numDocs; i++) {
bulkRequest.add(client.prepareIndex("test", "doc", Integer.toString(i))
.setSource("x", r.nextInt(), "y", r.nextDouble(), "z", r.nextDouble()));
if (i % 1000 == 0) {
bulkRequest.execute().actionGet();
bulkRequest = client.prepareBulk();
}
}
bulkRequest.execute().actionGet();
client.admin().indices().prepareRefresh("test").execute().actionGet();
client.admin().indices().prepareFlush("test").execute().actionGet();
System.out.println("done");
}
static TimeValue timeQueries(Client client, String lang, String script, int numQueries) {
ScriptSortBuilder sort = SortBuilders.scriptSort(new Script(script, ScriptType.INLINE, lang, null), "number");
SearchRequestBuilder req = client.prepareSearch(indexName)
.setQuery(QueryBuilders.matchAllQuery())
.addSort(sort);
StopWatch timer = new StopWatch();
timer.start();
for (int i = 0; i < numQueries; ++i) {
req.get();
}
timer.stop();
return timer.totalTime();
}
static void printResults(String lang, TimeValue time, int numQueries) {
long avgReq = time.millis() / numQueries;
System.out.println(lang + ": " + time.format(PeriodType.seconds()) + " (" + avgReq + " msec per req)");
}
}

View File

@ -1,335 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.lucene.search.function.CombineFunction;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.BufferedWriter;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.security.SecureRandom;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
public class BasicScriptBenchmark {
public static class RequestInfo {
public RequestInfo(SearchRequest source, int i) {
request = source;
numTerms = i;
}
SearchRequest request;
int numTerms;
}
public static class Results {
public static final String TIME_PER_DOCIN_MILLIS = "timePerDocinMillis";
public static final String NUM_TERMS = "numTerms";
public static final String NUM_DOCS = "numDocs";
public static final String TIME_PER_QUERY_IN_SEC = "timePerQueryInSec";
public static final String TOTAL_TIME_IN_SEC = "totalTimeInSec";
Double[] resultSeconds;
Double[] resultMSPerQuery;
Long[] numDocs;
Integer[] numTerms;
Double[] timePerDoc;
String label;
String description;
public String lineStyle;
public String color;
void init(int numVariations, String label, String description, String color, String lineStyle) {
resultSeconds = new Double[numVariations];
resultMSPerQuery = new Double[numVariations];
numDocs = new Long[numVariations];
numTerms = new Integer[numVariations];
timePerDoc = new Double[numVariations];
this.label = label;
this.description = description;
this.color = color;
this.lineStyle = lineStyle;
}
void set(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter, int which, int numTerms) {
resultSeconds[which] = (double) ((double) stopWatch.lastTaskTime().getMillis() / (double) 1000);
resultMSPerQuery[which] = (double) ((double) stopWatch.lastTaskTime().secondsFrac() / (double) maxIter);
numDocs[which] = searchResponse.getHits().totalHits();
this.numTerms[which] = numTerms;
timePerDoc[which] = resultMSPerQuery[which] / numDocs[which];
}
public void printResults(BufferedWriter writer) throws IOException {
String comma = (writer == null) ? "" : ";";
String results = description + "\n" + Results.TOTAL_TIME_IN_SEC + " = " + getResultArray(resultSeconds) + comma + "\n"
+ Results.TIME_PER_QUERY_IN_SEC + " = " + getResultArray(resultMSPerQuery) + comma + "\n" + Results.NUM_DOCS + " = "
+ getResultArray(numDocs) + comma + "\n" + Results.NUM_TERMS + " = " + getResultArray(numTerms) + comma + "\n"
+ Results.TIME_PER_DOCIN_MILLIS + " = " + getResultArray(timePerDoc) + comma + "\n";
if (writer != null) {
writer.write(results);
} else {
System.out.println(results);
}
}
private String getResultArray(Object[] resultArray) {
String result = "[";
for (int i = 0; i < resultArray.length; i++) {
result += resultArray[i].toString();
if (i != resultArray.length - 1) {
result += ",";
}
}
result += "]";
return result;
}
}
public BasicScriptBenchmark() {
}
static List<String> termsList = new ArrayList<>();
static void init(int numTerms) {
SecureRandom random = new SecureRandom();
random.setSeed(1);
termsList.clear();
for (int i = 0; i < numTerms; i++) {
String term = new BigInteger(512, random).toString(32);
termsList.add(term);
}
}
static String[] getTerms(int numTerms) {
String[] terms = new String[numTerms];
for (int i = 0; i < numTerms; i++) {
terms[i] = termsList.get(i);
}
return terms;
}
public static void writeHelperFunction() throws IOException {
try (BufferedWriter out = Files.newBufferedWriter(PathUtils.get("addToPlot.m"), StandardCharsets.UTF_8)) {
out.write("function handle = addToPlot(numTerms, perDoc, color, linestyle, linewidth)\n" + "handle = line(numTerms, perDoc);\n"
+ "set(handle, 'color', color);\n" + "set(handle, 'linestyle',linestyle);\n" + "set(handle, 'LineWidth',linewidth);\n"
+ "end\n");
}
}
public static void printOctaveScript(List<Results> allResults, String[] args) throws IOException {
if (args.length == 0) {
return;
}
try (BufferedWriter out = Files.newBufferedWriter(PathUtils.get(args[0]), StandardCharsets.UTF_8)) {
out.write("#! /usr/local/bin/octave -qf");
out.write("\n\n\n\n");
out.write("######################################\n");
out.write("# Octave script for plotting results\n");
String filename = "scriptScoreBenchmark" + new DateTime(DateTimeZone.UTC).toString();
out.write("#Call '" + args[0] + "' from the command line. The plot is then in " + filename + "\n\n");
out.write("handleArray = [];\n tagArray = [];\n plot([]);\n hold on;\n");
for (Results result : allResults) {
out.write("\n");
out.write("# " + result.description);
result.printResults(out);
out.write("handleArray = [handleArray, addToPlot(" + Results.NUM_TERMS + ", " + Results.TIME_PER_DOCIN_MILLIS + ", '"
+ result.color + "','" + result.lineStyle + "',5)];\n");
out.write("tagArray = [tagArray; '" + result.label + "'];\n");
out.write("\n");
}
out.write("xlabel(\'number of query terms');");
out.write("ylabel(\'query time per document');");
out.write("legend(handleArray,tagArray);\n");
out.write("saveas(gcf,'" + filename + ".png','png')\n");
out.write("hold off;\n\n");
} catch (IOException e) {
System.err.println("Error: " + e.getMessage());
}
writeHelperFunction();
}
static void printResult(SearchResponse searchResponse, StopWatch stopWatch, String queryInfo) {
System.out.println("--> Searching with " + queryInfo + " took " + stopWatch.lastTaskTime() + ", per query "
+ (stopWatch.lastTaskTime().secondsFrac() / 100) + " for " + searchResponse.getHits().totalHits() + " docs");
}
static void indexData(long numDocs, Client client, boolean randomizeTerms) throws IOException {
try {
client.admin().indices().prepareDelete("test").execute().actionGet();
} catch (Throwable t) {
// index might exist already, in this case we do nothing TODO: make
// saver in general
}
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("text").field("type", "string").field("index_options", "offsets").field("analyzer", "payload_float")
.endObject().endObject().endObject().endObject();
client.admin()
.indices()
.prepareCreate("test")
.addMapping("type1", mapping)
.setSettings(
Settings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
.putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
.put("index.analysis.filter.delimited_float.delimiter", "|")
.put("index.analysis.filter.delimited_float.encoding", "float")
.put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
.put("index.number_of_replicas", 0).put("index.number_of_shards", 1)).execute().actionGet();
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
BulkRequestBuilder bulkRequest = client.prepareBulk();
Random random = new Random(1);
for (int i = 0; i < numDocs; i++) {
bulkRequest.add(client.prepareIndex().setType("type1").setIndex("test")
.setSource(jsonBuilder().startObject().field("text", randomText(random, randomizeTerms)).endObject()));
if (i % 1000 == 0) {
bulkRequest.execute().actionGet();
bulkRequest = client.prepareBulk();
}
}
bulkRequest.execute().actionGet();
client.admin().indices().prepareRefresh("test").execute().actionGet();
client.admin().indices().prepareFlush("test").execute().actionGet();
System.out.println("Done indexing " + numDocs + " documents");
}
private static String randomText(Random random, boolean randomizeTerms) {
String text = "";
for (int i = 0; i < termsList.size(); i++) {
if (random.nextInt(5) == 3 || !randomizeTerms) {
text = text + " " + termsList.get(i) + "|1";
}
}
return text;
}
static void printTimings(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter) {
System.out.println(message);
System.out.println(stopWatch.lastTaskTime() + ", " + (stopWatch.lastTaskTime().secondsFrac() / maxIter) + ", "
+ searchResponse.getHits().totalHits() + ", "
+ (stopWatch.lastTaskTime().secondsFrac() / (maxIter + searchResponse.getHits().totalHits())));
}
static List<Entry<String, RequestInfo>> initTermQueries(int minTerms, int maxTerms) {
List<Entry<String, RequestInfo>> termSearchRequests = new ArrayList<>();
for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
Map<String, Object> params = new HashMap<>();
String[] terms = getTerms(nTerms + 1);
params.put("text", terms);
SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
searchSource().explain(false).size(0).query(QueryBuilders.termsQuery("text", terms)));
String infoString = "Results for term query with " + (nTerms + 1) + " terms:";
termSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, nTerms + 1)));
}
return termSearchRequests;
}
static List<Entry<String, RequestInfo>> initNativeSearchRequests(int minTerms, int maxTerms, String script, boolean langNative) {
List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<>();
for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
Map<String, Object> params = new HashMap<>();
String[] terms = getTerms(nTerms + 1);
params.put("text", terms);
String infoString = "Results for native script with " + (nTerms + 1) + " terms:";
ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(new Script(script, ScriptType.INLINE,
"native", params)) : scriptFunction(new Script(script, ScriptType.INLINE, null, params));
SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
searchSource()
.explain(false)
.size(0)
.query(functionScoreQuery(QueryBuilders.termsQuery("text", terms), scriptFunction).boostMode(
CombineFunction.REPLACE)));
nativeSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, nTerms + 1)));
}
return nativeSearchRequests;
}
static List<Entry<String, RequestInfo>> initScriptMatchAllSearchRequests(String script, boolean langNative) {
List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<>();
String infoString = "Results for constant score script:";
ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(new Script(script, ScriptType.INLINE, "native",
null)) : scriptFunction(new Script(script));
SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
searchSource().explain(false).size(0)
.query(functionScoreQuery(QueryBuilders.matchAllQuery(), scriptFunction).boostMode(CombineFunction.REPLACE)));
nativeSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, 0)));
return nativeSearchRequests;
}
static void runBenchmark(Client client, int maxIter, Results results, List<Entry<String, RequestInfo>> nativeSearchRequests,
int minTerms, int warmerIter) throws IOException {
int counter = 0;
for (Entry<String, RequestInfo> entry : nativeSearchRequests) {
SearchResponse searchResponse = null;
// warm up
for (int i = 0; i < warmerIter; i++) {
searchResponse = client.search(entry.getValue().request).actionGet();
}
System.gc();
// run benchmark
StopWatch stopWatch = new StopWatch();
stopWatch.start();
for (int i = 0; i < maxIter; i++) {
searchResponse = client.search(entry.getValue().request).actionGet();
}
stopWatch.stop();
results.set(searchResponse, stopWatch, entry.getKey(), maxIter, counter, entry.getValue().numTerms);
counter++;
}
results.printResults(null);
}
}

View File

@ -1,108 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score;
import org.elasticsearch.Version;
import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map.Entry;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
public class ScriptsConstantScoreBenchmark extends BasicScriptBenchmark {
public static void main(String[] args) throws Exception {
int minTerms = 49;
int maxTerms = 50;
int maxIter = 1000;
int warmerIter = 1000;
init(maxTerms);
List<Results> allResults = new ArrayList<>();
String clusterName = ScriptsConstantScoreBenchmark.class.getSimpleName();
Settings settings = settingsBuilder().put("name", "node1")
.put("cluster.name", clusterName).build();
Collection<Class<? extends Plugin>> plugins = Collections.<Class<? extends Plugin>>singletonList(NativeScriptExamplesPlugin.class);
Node node1 = new MockNode(settings, Version.CURRENT, plugins);
node1.start();
Client client = node1.client();
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
indexData(10000, client, true);
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
Results results = new Results();
results.init(maxTerms - minTerms, "native const script score (log(2) 10X)",
"Results for native const script score with score = log(2) 10X:", "black", "-.");
// init script searches
List<Entry<String, RequestInfo>> searchRequests = initScriptMatchAllSearchRequests(
NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
// init native script searches
results = new Results();
results.init(maxTerms - minTerms, "mvel const (log(2) 10X)", "Results for mvel const score = log(2) 10X:", "red", "-.");
searchRequests = initScriptMatchAllSearchRequests("score = 0; for (int i=0; i<10;i++) {score = score + log(2);} return score",
false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "native const script score (2)", "Results for native const script score with score = 2:",
"black", ":");
// init native script searches
searchRequests = initScriptMatchAllSearchRequests(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "mvel const (2)", "Results for mvel const score = 2:", "red", "--");
// init native script searches
searchRequests = initScriptMatchAllSearchRequests("2", false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
printOctaveScript(allResults, args);
client.close();
node1.close();
}
}

View File

@ -1,142 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score;
import org.elasticsearch.Version;
import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map.Entry;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
public class ScriptsScoreBenchmark extends BasicScriptBenchmark {
public static void main(String[] args) throws Exception {
int minTerms = 1;
int maxTerms = 50;
int maxIter = 100;
int warmerIter = 10;
boolean runMVEL = false;
init(maxTerms);
List<Results> allResults = new ArrayList<>();
String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
Settings settings = settingsBuilder().put("name", "node1")
.put("cluster.name", clusterName).build();
Collection<Class<? extends Plugin>> plugins = Collections.<Class<? extends Plugin>>singletonList(NativeScriptExamplesPlugin.class);
Node node1 = new MockNode(settings, Version.CURRENT, plugins);
node1.start();
Client client = node1.client();
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
indexData(10000, client, false);
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
Results results = new Results();
results.init(maxTerms - minTerms, "native tfidf script score dense posting list",
"Results for native script score with dense posting list:", "black", "--");
// init native script searches
List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "term query dense posting list", "Results for term query with dense posting lists:", "green",
"--");
// init term queries
searchRequests = initTermQueries(minTerms, maxTerms);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
if (runMVEL) {
results = new Results();
results.init(maxTerms - minTerms, "mvel tfidf dense posting list", "Results for mvel score with dense posting list:", "red",
"--");
// init native script searches
searchRequests = initNativeSearchRequests(
minTerms,
maxTerms,
"score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
}
indexData(10000, client, true);
results = new Results();
results.init(maxTerms - minTerms, "native tfidf script score sparse posting list",
"Results for native script scorewith sparse posting list:", "black", "-.");
// init native script searches
searchRequests = initNativeSearchRequests(minTerms, maxTerms, NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
results.init(maxTerms - minTerms, "term query sparse posting list", "Results for term query with sparse posting lists:", "green",
"-.");
// init term queries
searchRequests = initTermQueries(minTerms, maxTerms);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
if (runMVEL) {
results = new Results();
results.init(maxTerms - minTerms, "mvel tfidf sparse posting list", "Results for mvel score with sparse posting list:", "red",
"-.");
// init native script searches
searchRequests = initNativeSearchRequests(
minTerms,
maxTerms,
"score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
false);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
}
printOctaveScript(allResults, args);
client.close();
node1.close();
}
}

View File

@ -1,89 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score;
import org.elasticsearch.Version;
import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map.Entry;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*
*/
public class ScriptsScorePayloadSumBenchmark extends BasicScriptBenchmark {
public static void main(String[] args) throws Exception {
int minTerms = 1;
int maxTerms = 50;
int maxIter = 100;
int warmerIter = 10;
init(maxTerms);
List<Results> allResults = new ArrayList<>();
String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
Settings settings = settingsBuilder().put("name", "node1")
.put("cluster.name", clusterName).build();
Collection<Class<? extends Plugin>> plugins = Collections.<Class<? extends Plugin>>singletonList(NativeScriptExamplesPlugin.class);
Node node1 = new MockNode(settings, Version.CURRENT, plugins);
node1.start();
Client client = node1.client();
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
indexData(10000, client, false);
client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
Results results = new Results();
// init script searches
results.init(maxTerms - minTerms, "native payload sum script score", "Results for native script score:", "green", ":");
List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
results = new Results();
// init script searches
results.init(maxTerms - minTerms, "native payload sum script score no record", "Results for native script score:", "black", ":");
searchRequests = initNativeSearchRequests(minTerms, maxTerms,
NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, true);
// run actual benchmark
runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
allResults.add(results);
printOctaveScript(allResults, args);
client.close();
node1.close();
}
}

View File

@ -1,49 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.plugin;
import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript;
import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ScriptModule;
public class NativeScriptExamplesPlugin extends Plugin {
@Override
public String name() {
return "native-script-example";
}
@Override
public String description() {
return "Native script examples";
}
public void onModule(ScriptModule module) {
module.registerScript(NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, NativeNaiveTFIDFScoreScript.Factory.class);
module.registerScript(NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, NativeConstantForLoopScoreScript.Factory.class);
module.registerScript(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, NativeConstantScoreScript.Factory.class);
module.registerScript(NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, NativePayloadSumScoreScript.Factory.class);
module.registerScript(NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, NativePayloadSumNoRecordScoreScript.Factory.class);
}
}

View File

@ -1,59 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.script;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeConstantForLoopScoreScript extends AbstractSearchScript {
public static final String NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE = "native_constant_for_loop_script_score";
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeConstantForLoopScoreScript(params);
}
@Override
public boolean needsScores() {
return false;
}
}
private NativeConstantForLoopScoreScript(Map<String, Object> params) {
}
@Override
public Object run() {
float score = 0;
for (int i = 0; i < 10; i++) {
score += Math.log(2);
}
return score;
}
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.script;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.Map;
public class NativeConstantScoreScript extends AbstractSearchScript {
public static final String NATIVE_CONSTANT_SCRIPT_SCORE = "native_constant_script_score";
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeConstantScoreScript();
}
@Override
public boolean needsScores() {
return false;
}
}
private NativeConstantScoreScript() {
}
@Override
public Object run() {
return 2;
}
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.script;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import org.elasticsearch.search.lookup.IndexFieldTerm;
import org.elasticsearch.search.lookup.IndexField;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Map;
public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
public static final String NATIVE_NAIVE_TFIDF_SCRIPT_SCORE = "native_naive_tfidf_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativeNaiveTFIDFScoreScript(params);
}
@Override
public boolean needsScores() {
return false;
}
}
private NativeNaiveTFIDFScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i]);
try {
if (indexFieldTerm.tf() != 0) {
score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return score;
}
}

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.script;
import org.elasticsearch.search.lookup.IndexFieldTerm;
import org.elasticsearch.search.lookup.IndexField;
import org.elasticsearch.search.lookup.IndexLookup;
import org.elasticsearch.search.lookup.TermPosition;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.ArrayList;
import java.util.Map;
public class NativePayloadSumNoRecordScoreScript extends AbstractSearchScript {
public static final String NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE = "native_payload_sum_no_record_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativePayloadSumNoRecordScoreScript(params);
}
@Override
public boolean needsScores() {
return false;
}
}
private NativePayloadSumNoRecordScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS);
for (TermPosition pos : indexFieldTerm) {
score += pos.payloadAsFloat(0);
}
}
return score;
}
}

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.scripts.score.script;
import org.elasticsearch.search.lookup.IndexFieldTerm;
import org.elasticsearch.search.lookup.IndexField;
import org.elasticsearch.search.lookup.IndexLookup;
import org.elasticsearch.search.lookup.TermPosition;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.script.AbstractSearchScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.NativeScriptFactory;
import java.util.ArrayList;
import java.util.Map;
public class NativePayloadSumScoreScript extends AbstractSearchScript {
public static final String NATIVE_PAYLOAD_SUM_SCRIPT_SCORE = "native_payload_sum_script_score";
String field = null;
String[] terms = null;
public static class Factory implements NativeScriptFactory {
@Override
public ExecutableScript newScript(@Nullable Map<String, Object> params) {
return new NativePayloadSumScoreScript(params);
}
@Override
public boolean needsScores() {
return false;
}
}
private NativePayloadSumScoreScript(Map<String, Object> params) {
params.entrySet();
terms = new String[params.size()];
field = params.keySet().iterator().next();
Object o = params.get(field);
ArrayList<String> arrayList = (ArrayList<String>) o;
terms = arrayList.toArray(new String[arrayList.size()]);
}
@Override
public Object run() {
float score = 0;
IndexField indexField = indexLookup().get(field);
for (int i = 0; i < terms.length; i++) {
IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS | IndexLookup.FLAG_CACHE);
for (TermPosition pos : indexFieldTerm) {
score += pos.payloadAsFloat(0);
}
}
return score;
}
}

View File

@ -1,172 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestBuilders;
import java.io.IOException;
import java.util.List;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
/**
*/
public class SuggestSearchBenchMark {
public static void main(String[] args) throws Exception {
int SEARCH_ITERS = 200;
Settings settings = settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Client client = nodes[0].client();
try {
client.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("_source").field("enabled", false).endObject()
.startObject("_all").field("enabled", false).endObject()
.startObject("_type").field("index", "no").endObject()
.startObject("_id").field("index", "no").endObject()
.startObject("properties")
.startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
.endObject()
.endObject().endObject()).execute().actionGet();
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
StopWatch stopWatch = new StopWatch().start();
long COUNT = SizeValue.parseSizeValue("10m").singles();
int BATCH = 100;
System.out.println("Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
char character = 'a';
int idCounter = 0;
for (; i <= ITERS; i++) {
int termCounter = 0;
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(idCounter++)).source(source("prefix" + character + termCounter++)));
}
character++;
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("failures...");
}
}
System.out.println("Indexing took " + stopWatch.totalTime());
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("Count: " + client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("Count: " + client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
}
System.out.println("Warming up...");
char startChar = 'a';
for (int i = 0; i <= 20; i++) {
String term = "prefix" + startChar;
SearchResponse response = client.prepareSearch()
.setQuery(prefixQuery("field", term))
.suggest(
new SuggestBuilder().addSuggestion(SuggestBuilders.termSuggestion("field").field("field").text(term)
.suggestMode("always")))
.execute().actionGet();
if (response.getHits().totalHits() == 0) {
System.err.println("No hits");
continue;
}
startChar++;
}
System.out.println("Starting benchmarking suggestions.");
startChar = 'a';
long timeTaken = 0;
for (int i = 0; i <= SEARCH_ITERS; i++) {
String term = "prefix" + startChar;
SearchResponse response = client.prepareSearch()
.setQuery(matchQuery("field", term))
.suggest(
new SuggestBuilder().addSuggestion(SuggestBuilders.termSuggestion("field").text(term).field("field")
.suggestMode("always")))
.execute().actionGet();
timeTaken += response.getTookInMillis();
if (response.getSuggest() == null) {
System.err.println("No suggestions");
continue;
}
List<? extends Option> options = response.getSuggest().getSuggestion("field").getEntries().get(0).getOptions();
if (options == null || options.isEmpty()) {
System.err.println("No suggestions");
}
startChar++;
}
System.out.println("Avg time taken without filter " + (timeTaken / SEARCH_ITERS));
client.close();
for (Node node : nodes) {
node.close();
}
}
private static XContentBuilder source(String nameValue) throws IOException {
return jsonBuilder().startObject()
.field("field", nameValue)
.endObject();
}
}

View File

@ -1,156 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
public class CardinalityAggregationSearchBenchmark {
private static final Random R = new Random();
private static final String CLUSTER_NAME = CardinalityAggregationSearchBenchmark.class.getSimpleName();
private static final int NUM_DOCS = 10000000;
private static final int LOW_CARD = 1000;
private static final int HIGH_CARD = 1000000;
private static final int BATCH = 100;
private static final int WARM = 5;
private static final int RUNS = 10;
private static final int ITERS = 5;
public static void main(String[] args) {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 5)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", CLUSTER_NAME)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = clientNode.client();
try {
client.admin().indices().create(createIndexRequest("index").settings(settings).mapping("type",
jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("low_card_str_value")
.field("type", "multi_field")
.startObject("fields")
.startObject("low_card_str_value")
.field("type", "string")
.endObject()
.startObject("hash")
.field("type", "murmur3")
.endObject()
.endObject()
.endObject()
.startObject("high_card_str_value")
.field("type", "multi_field")
.startObject("fields")
.startObject("high_card_str_value")
.field("type", "string")
.endObject()
.startObject("hash")
.field("type", "murmur3")
.endObject()
.endObject()
.endObject()
.startObject("low_card_num_value")
.field("type", "long")
.endObject()
.startObject("high_card_num_value")
.field("type", "long")
.endObject()
.endObject().endObject().endObject())).actionGet();
System.out.println("Indexing " + NUM_DOCS + " documents");
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUM_DOCS; ) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
final int lowCard = RandomInts.randomInt(R, LOW_CARD);
final int highCard = RandomInts.randomInt(R, HIGH_CARD);
request.add(client.prepareIndex("index", "type", Integer.toString(i)).setSource("low_card_str_value", "str" + lowCard, "high_card_str_value", "str" + highCard, "low_card_num_value", lowCard , "high_card_num_value", highCard));
++i;
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
System.err.println(response.buildFailureMessage());
}
if ((i % 100000) == 0) {
System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
client.admin().indices().prepareRefresh("index").execute().actionGet();
} catch (Exception e) {
System.out.println("Index already exists, skipping index creation");
}
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
for (int i = 0; i < WARM + RUNS; ++i) {
if (i >= WARM) {
System.out.println("RUN " + (i - WARM));
}
for (String field : new String[] {"low_card_str_value", "low_card_str_value.hash", "high_card_str_value", "high_card_str_value.hash", "low_card_num_value", "high_card_num_value"}) {
long start = System.nanoTime();
SearchResponse resp = null;
for (int j = 0; j < ITERS; ++j) {
resp = client.prepareSearch("index").setSize(0).addAggregation(cardinality("cardinality").field(field)).execute().actionGet();
}
long end = System.nanoTime();
final long cardinality = ((Cardinality) resp.getAggregations().get("cardinality")).getValue();
if (i >= WARM) {
System.out.println(field + "\t" + new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS) + "\tcardinality=" + cardinality);
}
}
}
}
}

View File

@ -1,248 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.IntIntHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.benchmark.search.aggregations.TermsAggregationSearchBenchmark.StatsResult;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.transport.TransportModule;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
@SuppressForbidden(reason = "not really source code or a test")
public class GlobalOrdinalsBenchmark {
private static final String INDEX_NAME = "index";
private static final String TYPE_NAME = "type";
private static final int QUERY_WARMUP = 25;
private static final int QUERY_COUNT = 100;
private static final int FIELD_START = 1;
private static final int FIELD_LIMIT = 1 << 22;
private static final boolean USE_DOC_VALUES = false;
static long COUNT = SizeValue.parseSizeValue("5m").singles();
static Node node;
static Client client;
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put(TransportModule.TRANSPORT_TYPE_KEY, "local")
.put("cluster.name", GlobalOrdinalsBenchmark.class.getSimpleName())
.build();
node = new Node(settings).start();
client = node.client();
try {
client.admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, jsonBuilder().startObject().startObject(TYPE_NAME)
.startArray("dynamic_templates")
.startObject()
.startObject("default")
.field("match", "*")
.field("match_mapping_type", "string")
.startObject("mapping")
.field("type", "string")
.field("index", "not_analyzed")
.startObject("fields")
.startObject("doc_values")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endArray()
.endObject().endObject())
.get();
ObjectHashSet<String> uniqueTerms = new ObjectHashSet<>();
for (int i = 0; i < FIELD_LIMIT; i++) {
boolean added;
do {
added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, 16));
} while (!added);
}
String[] sValues = uniqueTerms.toArray(String.class);
uniqueTerms = null;
BulkRequestBuilder builder = client.prepareBulk();
IntIntHashMap tracker = new IntIntHashMap();
for (int i = 0; i < COUNT; i++) {
Map<String, Object> fieldValues = new HashMap<>();
for (int fieldSuffix = 1; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
int index = tracker.putOrAdd(fieldSuffix, 0, 0);
if (index >= fieldSuffix) {
index = random.nextInt(fieldSuffix);
fieldValues.put("field_" + fieldSuffix, sValues[index]);
} else {
fieldValues.put("field_" + fieldSuffix, sValues[index]);
tracker.put(fieldSuffix, ++index);
}
}
builder.add(
client.prepareIndex(INDEX_NAME, TYPE_NAME, String.valueOf(i))
.setSource(fieldValues)
);
if (builder.numberOfActions() >= 1000) {
builder.get();
builder = client.prepareBulk();
}
}
if (builder.numberOfActions() > 0) {
builder.get();
}
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put("logger.index.fielddata.ordinals", "DEBUG"))
.get();
client.admin().indices().prepareRefresh(INDEX_NAME).execute().actionGet();
COUNT = client.prepareSearch(INDEX_NAME).setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
System.out.println("--> Number of docs in index: " + COUNT);
List<StatsResult> stats = new ArrayList<>();
for (int fieldSuffix = FIELD_START; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
String fieldName = "field_" + fieldSuffix;
String name = "global_ordinals-" + fieldName;
if (USE_DOC_VALUES) {
fieldName = fieldName + ".doc_values";
name = name + "_doc_values"; // can't have . in agg name
}
stats.add(terms(name, fieldName, "global_ordinals_low_cardinality"));
}
for (int fieldSuffix = FIELD_START; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
String fieldName = "field_" + fieldSuffix;
String name = "ordinals-" + fieldName;
if (USE_DOC_VALUES) {
fieldName = fieldName + ".doc_values";
name = name + "_doc_values"; // can't have . in agg name
}
stats.add(terms(name, fieldName, "ordinals"));
}
System.out.println("------------------ SUMMARY -----------------------------------------");
System.out.format(Locale.ENGLISH, "%30s%10s%10s%15s\n", "name", "took", "millis", "fieldata size");
for (StatsResult stat : stats) {
System.out.format(Locale.ENGLISH, "%30s%10s%10d%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed);
}
System.out.println("------------------ SUMMARY -----------------------------------------");
client.close();
node.close();
}
private static StatsResult terms(String name, String field, String executionHint) {
long totalQueryTime;// LM VALUE
client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
System.gc();
System.out.println("--> Warmup (" + name + ")...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(INDEX_NAME)
.setSize(0)
.setQuery(matchAllQuery())
.addAggregation(AggregationBuilders.terms(name).field(field).executionHint(executionHint))
.get();
if (j == 0) {
System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup (" + name + ") DONE");
System.out.println("--> Running (" + name + ")...");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(INDEX_NAME)
.setSize(0)
.setQuery(matchAllQuery())
.addAggregation(AggregationBuilders.terms(name).field(field).executionHint(executionHint))
.get();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
String nodeId = node.injector().getInstance(Discovery.class).localNode().getId();
ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeId).get();
System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed);
}
}

View File

@ -1,152 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeUnit;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesMethod;
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
public class HDRPercentilesAggregationBenchmark {
private static final String TYPE_NAME = "type";
private static final String INDEX_NAME = "index";
private static final String HIGH_CARD_FIELD_NAME = "high_card";
private static final String LOW_CARD_FIELD_NAME = "low_card";
private static final String GAUSSIAN_FIELD_NAME = "gauss";
private static final Random R = new Random();
private static final String CLUSTER_NAME = HDRPercentilesAggregationBenchmark.class.getSimpleName();
private static final int NUM_DOCS = 10000000;
private static final int LOW_CARD = 1000;
private static final int HIGH_CARD = 1000000;
private static final int BATCH = 100;
private static final int WARM = 5;
private static final int RUNS = 10;
private static final int ITERS = 5;
public static void main(String[] args) {
long overallStartTime = System.currentTimeMillis();
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 5)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", CLUSTER_NAME)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = clientNode.client();
try {
client.admin().indices().prepareCreate(INDEX_NAME);
System.out.println("Indexing " + NUM_DOCS + " documents");
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUM_DOCS; ) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
final int lowCard = RandomInts.randomInt(R, LOW_CARD);
final int highCard = RandomInts.randomInt(R, HIGH_CARD);
int gauss = -1;
while (gauss < 0) {
gauss = (int) (R.nextGaussian() * 1000) + 5000; // mean: 5 sec, std deviation: 1 sec
}
request.add(client.prepareIndex(INDEX_NAME, TYPE_NAME, Integer.toString(i)).setSource(LOW_CARD_FIELD_NAME, lowCard,
HIGH_CARD_FIELD_NAME, highCard, GAUSSIAN_FIELD_NAME, gauss));
++i;
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
System.err.println(response.buildFailureMessage());
}
if ((i % 100000) == 0) {
System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
client.admin().indices().prepareRefresh(INDEX_NAME).execute().actionGet();
} catch (Exception e) {
System.out.println("Index already exists, skipping index creation");
}
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
System.out.println("Run\tField\tMethod\tAggregationTime\tEstimatedMemory");
for (int i = 0; i < WARM + RUNS; ++i) {
for (String field : new String[] { LOW_CARD_FIELD_NAME, HIGH_CARD_FIELD_NAME, GAUSSIAN_FIELD_NAME }) {
for (PercentilesMethod method : new PercentilesMethod[] {PercentilesMethod.TDIGEST, PercentilesMethod.HDR}) {
long start = System.nanoTime();
SearchResponse resp = null;
for (int j = 0; j < ITERS; ++j) {
resp = client.prepareSearch(INDEX_NAME).setSize(0).addAggregation(percentiles("percentiles").field(field).method(method)).execute().actionGet();
}
long end = System.nanoTime();
long memoryEstimate = 0;
switch (method) {
case TDIGEST:
memoryEstimate = ((InternalTDigestPercentiles) resp.getAggregations().get("percentiles"))
.getEstimatedMemoryFootprint();
break;
case HDR:
memoryEstimate = ((InternalHDRPercentiles) resp.getAggregations().get("percentiles")).getEstimatedMemoryFootprint();
break;
}
if (i >= WARM) {
System.out.println((i - WARM) + "\t" + field + "\t" + method + "\t"
+ new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS).millis() + "\t"
+ new SizeValue(memoryEstimate, SizeUnit.SINGLE).singles());
}
}
}
}
long overallEndTime = System.currentTimeMillis();
System.out.println("Benchmark completed in " + ((overallEndTime - overallStartTime) / 1000) + " seconds");
}
}

View File

@ -1,222 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.node.Node;
import java.util.Date;
import java.util.Random;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
/**
*
*/
public class HistogramAggregationSearchBenchmark {
static final long COUNT = SizeValue.parseSizeValue("20m").singles();
static final int BATCH = 1000;
static final int QUERY_WARMUP = 5;
static final int QUERY_COUNT = 20;
static final int NUMBER_OF_TERMS = 1000;
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", HistogramAggregationSearchBenchmark.class.getSimpleName())
.build();
Node node1 = new Node(settingsBuilder().put(settings).put("name", "node1").build()).start();
//Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = node1.client();
long[] lValues = new long[NUMBER_OF_TERMS];
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
lValues[i] = i;
}
Random r = new Random();
try {
client.admin().indices().prepareCreate("test")
.setSettings(settingsBuilder().put(settings))
.addMapping("type1", jsonBuilder()
.startObject()
.startObject("type1")
.startObject("properties")
.startObject("l_value")
.field("type", "long")
.endObject()
.startObject("i_value")
.field("type", "integer")
.endObject()
.startObject("s_value")
.field("type", "short")
.endObject()
.startObject("b_value")
.field("type", "byte")
.endObject()
.endObject()
.endObject()
.endObject())
.execute().actionGet();
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long iters = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= iters; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
final long value = lValues[r.nextInt(lValues.length)];
XContentBuilder source = jsonBuilder().startObject()
.field("id", Integer.valueOf(counter))
.field("l_value", value)
.field("i_value", (int) value)
.field("s_value", (short) value)
.field("b_value", (byte) value)
.field("date", new Date())
.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(source));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
client.admin().indices().prepareFlush("test").execute().actionGet();
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
if (client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits() != COUNT) {
throw new Error();
}
System.out.println("--> Number of docs in index: " + COUNT);
System.out.println("--> Warmup...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(histogram("l_value").field("l_value").interval(4))
.addAggregation(histogram("i_value").field("i_value").interval(4))
.addAggregation(histogram("s_value").field("s_value").interval(4))
.addAggregation(histogram("b_value").field("b_value").interval(4))
.addAggregation(histogram("date").field("date").interval(1000))
.execute().actionGet();
if (j == 0) {
System.out.println("--> Warmup took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup DONE");
long totalQueryTime = 0;
for (String field : new String[] {"b_value", "s_value", "i_value", "l_value"}) {
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(histogram(field).field(field).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(histogram(field).field(field).subAggregation(stats(field).field(field)).interval(4))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(dateHistogram("date").field("date").interval(1000))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addAggregation(dateHistogram("date").field("date").interval(1000).subAggregation(stats("stats").field("l_value")))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram Aggregation (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
node1.close();
}
}

View File

@ -1,125 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
public class IncludeExcludeAggregationSearchBenchmark {
private static final Random R = new Random();
private static final String CLUSTER_NAME = IncludeExcludeAggregationSearchBenchmark.class.getSimpleName();
private static final int NUM_DOCS = 10000000;
private static final int BATCH = 100;
private static final int WARM = 3;
private static final int RUNS = 10;
private static final int ITERS = 3;
public static void main(String[] args) {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", CLUSTER_NAME)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = clientNode.client();
try {
client.admin().indices().create(createIndexRequest("index").settings(settings).mapping("type",
jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("str")
.field("type", "string")
.field("index", "not_analyzed")
.endObject()
.endObject().endObject().endObject())).actionGet();
System.out.println("Indexing " + NUM_DOCS + " documents");
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUM_DOCS; ) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
request.add(client.prepareIndex("index", "type", Integer.toString(i)).setSource("str", TestUtil.randomSimpleString(R)));
++i;
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
System.err.println(response.buildFailureMessage());
}
if ((i % 100000) == 0) {
System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
client.admin().indices().prepareRefresh("index").execute().actionGet();
} catch (Exception e) {
System.out.println("Index already exists, skipping index creation");
}
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
for (int i = 0; i < WARM + RUNS; ++i) {
if (i >= WARM) {
System.out.println("RUN " + (i - WARM));
}
long start = System.nanoTime();
SearchResponse resp = null;
for (int j = 0; j < ITERS; ++j) {
resp = client.prepareSearch("index").setQuery(QueryBuilders.prefixQuery("str", "sf")).setSize(0).addAggregation(terms("t").field("str").include("s.*")).execute().actionGet();
}
long end = System.nanoTime();
if (i >= WARM) {
System.out.println(new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS));
}
}
}
}

View File

@ -1,208 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.client.Requests.getRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
public class PercentilesAggregationSearchBenchmark {
private static final int AMPLITUDE = 10000;
private static final int NUM_DOCS = (int) SizeValue.parseSizeValue("1m").singles();
private static final int BATCH = 100;
private static final String CLUSTER_NAME = PercentilesAggregationSearchBenchmark.class.getSimpleName();
private static final double[] PERCENTILES = new double[] { 0, 0.01, 0.1, 1, 10, 25, 50, 75, 90, 99, 99.9, 99.99, 100};
private static final int QUERY_WARMUP = 10;
private static final int QUERY_COUNT = 20;
private static Random R = new Random(0);
// we generate ints to not disadvantage qdigest which only works with integers
private enum Distribution {
UNIFORM {
@Override
int next() {
return (int) (R.nextDouble() * AMPLITUDE);
}
},
GAUSS {
@Override
int next() {
return (int) (R.nextDouble() * AMPLITUDE);
}
},
LOG_NORMAL {
@Override
int next() {
return (int) Math.exp(R.nextDouble() * Math.log(AMPLITUDE));
}
};
String indexName() {
return name().toLowerCase(Locale.ROOT);
}
abstract int next();
}
private static double accuratePercentile(double percentile, int[] sortedValues) {
final double index = percentile / 100 * (sortedValues.length - 1);
final int intIndex = (int) index;
final double delta = index - intIndex;
if (delta == 0) {
return sortedValues[intIndex];
} else {
return sortedValues[intIndex] * (1 - delta) + sortedValues[intIndex + 1] * delta;
}
}
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 100) // to also test performance and accuracy of the reduce phase
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", CLUSTER_NAME)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
Client client = clientNode.client();
for (Distribution d : Distribution.values()) {
try {
// client.admin().indices().prepareDelete(d.indexName()).execute().actionGet();
client.admin().indices().create(createIndexRequest(d.indexName()).settings(settings)).actionGet();
} catch (Exception e) {
System.out.println("Index " + d.indexName() + " already exists, skipping index creation");
continue;
}
final int[] values = new int[NUM_DOCS];
for (int i = 0; i < NUM_DOCS; ++i) {
values[i] = d.next();
}
System.out.println("Indexing " + NUM_DOCS + " documents into " + d.indexName());
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUM_DOCS; ) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
request.add(client.prepareIndex(d.indexName(), "values", Integer.toString(i)).setSource("v", values[i]));
++i;
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
System.err.println(response.buildFailureMessage());
}
if ((i % 100000) == 0) {
System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
Arrays.sort(values);
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
for (double percentile : PERCENTILES) {
builder.field(Double.toString(percentile), accuratePercentile(percentile, values));
}
client.prepareIndex(d.indexName(), "values", "percentiles").setSource(builder.endObject()).execute().actionGet();
client.admin().indices().prepareRefresh(d.indexName()).execute().actionGet();
}
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
System.out.println("## Precision");
for (Distribution d : Distribution.values()) {
System.out.println("#### " + d);
final long count = client.prepareSearch(d.indexName()).setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
if (count != NUM_DOCS + 1) {
throw new Error("Expected " + NUM_DOCS + " documents, got " + (count - 1));
}
Map<String, Object> percentilesUnsorted = client.get(getRequest(d.indexName()).type("values").id("percentiles")).actionGet().getSourceAsMap();
SortedMap<Double, Double> percentiles = new TreeMap<>();
for (Map.Entry<String, Object> entry : percentilesUnsorted.entrySet()) {
percentiles.put(Double.parseDouble(entry.getKey()), (Double) entry.getValue());
}
System.out.println("Expected percentiles: " + percentiles);
System.out.println();
SearchResponse resp = client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
Percentiles pcts = resp.getAggregations().get("pcts");
Map<Double, Double> asMap = new LinkedHashMap<>();
double sumOfErrorSquares = 0;
for (Percentile percentile : pcts) {
asMap.put(percentile.getPercent(), percentile.getValue());
double error = percentile.getValue() - percentiles.get(percentile.getPercent());
sumOfErrorSquares += error * error;
}
System.out.println("Percentiles: " + asMap);
System.out.println("Sum of error squares: " + sumOfErrorSquares);
System.out.println();
}
System.out.println("## Performance");
for (int i = 0; i < 3; ++i) {
for (Distribution d : Distribution.values()) {
System.out.println("#### " + d);
for (int j = 0; j < QUERY_WARMUP; ++j) {
client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
}
long start = System.nanoTime();
for (int j = 0; j < QUERY_COUNT; ++j) {
client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
}
System.out.println(new TimeValue((System.nanoTime() - start) / QUERY_COUNT, TimeUnit.NANOSECONDS));
}
}
}
}

View File

@ -1,144 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
public class QueryFilterAggregationSearchBenchmark {
static final long COUNT = SizeValue.parseSizeValue("5m").singles();
static final int BATCH = 1000;
static final int QUERY_COUNT = 200;
static final int NUMBER_OF_TERMS = 200;
static Client client;
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 2)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("name", "node1")
.put("cluster.name", QueryFilterAggregationSearchBenchmark.class.getSimpleName())
.build();
Node node1 = new Node(settings).start();
client = node1.client();
long[] lValues = new long[NUMBER_OF_TERMS];
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
lValues[i] = ThreadLocalRandom.current().nextLong();
}
Thread.sleep(10000);
try {
client.admin().indices().create(createIndexRequest("test")).actionGet();
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
XContentBuilder builder = jsonBuilder().startObject();
builder.field("id", Integer.toString(counter));
builder.field("l_value", lValues[ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS)]);
builder.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(builder));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 100000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
if (client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits() != COUNT) {
throw new Error();
}
System.out.println("--> Number of docs in index: " + COUNT);
final long anyValue = ((Number) client.prepareSearch().execute().actionGet().getHits().hits()[0].sourceAsMap().get("l_value")).longValue();
long totalQueryTime = 0;
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setSize(0)
.setQuery(termQuery("l_value", anyValue))
.execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Simple Query on first l_value " + totalQueryTime + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setSize(0)
.setQuery(termQuery("l_value", anyValue))
.addAggregation(AggregationBuilders.filter("filter").filter(QueryBuilders.termQuery("l_value", anyValue)))
.execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Filter agg first l_value " + totalQueryTime + "ms");
}
}

View File

@ -1,309 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.ObjectScatterSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class SubAggregationSearchCollectModeBenchmark {
static long COUNT = SizeValue.parseSizeValue("2m").singles();
static int BATCH = 1000;
static int QUERY_WARMUP = 10;
static int QUERY_COUNT = 100;
static int NUMBER_OF_TERMS = 200;
static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
static int STRING_TERM_SIZE = 5;
static Client client;
static Node[] nodes;
public static void main(String[] args) throws Exception {
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", SubAggregationSearchCollectModeBenchmark.class.getSimpleName())
.build();
nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
client = clientNode.client();
Thread.sleep(10000);
try {
client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
.startObject()
.startObject("type1")
.startObject("properties")
.startObject("s_value_dv")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("sm_value_dv")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("l_value_dv")
.field("type", "long")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("lm_value_dv")
.field("type", "long")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject())).actionGet();
long[] lValues = new long[NUMBER_OF_TERMS];
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
lValues[i] = ThreadLocalRandom.current().nextLong();
}
ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
boolean added;
do {
added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
} while (!added);
}
String[] sValues = uniqueTerms.toArray(String.class);
uniqueTerms = null;
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
XContentBuilder builder = jsonBuilder().startObject();
builder.field("id", Integer.toString(counter));
final String sValue = sValues[ThreadLocalRandom.current().nextInt(sValues.length)];
final long lValue = lValues[ThreadLocalRandom.current().nextInt(lValues.length)];
builder.field("s_value", sValue);
builder.field("l_value", lValue);
builder.field("s_value_dv", sValue);
builder.field("l_value_dv", lValue);
for (String field : new String[] {"sm_value", "sm_value_dv"}) {
builder.startArray(field);
for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
}
builder.endArray();
}
for (String field : new String[] {"lm_value", "lm_value_dv"}) {
builder.startArray(field);
for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
builder.value(lValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
}
builder.endArray();
}
builder.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(builder));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
COUNT = client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
System.out.println("--> Number of docs in index: " + COUNT);
List<StatsResult> stats = new ArrayList<>();
stats.add(runTest("0000", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("0001", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("0010", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("0011", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("0100", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("0101", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("0110", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("0111", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("1000", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("1001", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("1010", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("1011", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("1100", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("1101", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
stats.add(runTest("1110", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
stats.add(runTest("1111", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
System.out.println("------------------ SUMMARY ----------------------------------------------");
System.out.format(Locale.ENGLISH, "%35s%10s%10s%15s%15s\n", "name", "took", "millis", "fieldata size", "heap used");
for (StatsResult stat : stats) {
System.out.format(Locale.ENGLISH, "%35s%10s%10d%15s%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed, stat.heapUsed);
}
System.out.println("------------------ SUMMARY ----------------------------------------------");
clientNode.close();
for (Node node : nodes) {
node.close();
}
}
public static class StatsResult {
final String name;
final long took;
final ByteSizeValue fieldDataMemoryUsed;
final ByteSizeValue heapUsed;
public StatsResult(String name, long took, ByteSizeValue fieldDataMemoryUsed, ByteSizeValue heapUsed) {
this.name = name;
this.took = took;
this.fieldDataMemoryUsed = fieldDataMemoryUsed;
this.heapUsed = heapUsed;
}
}
private static StatsResult runTest(String name, SubAggCollectionMode[] collectionModes) {
long totalQueryTime;// LM VALUE
client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
System.gc();
System.out.println("--> Warmup (" + name + ")...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch("test")
.setSize(0)
.setQuery(matchAllQuery())
.addAggregation(AggregationBuilders.terms(name + "s_value").field("s_value").collectMode(collectionModes[0])
.subAggregation(AggregationBuilders.terms(name + "l_value").field("l_value").collectMode(collectionModes[1])
.subAggregation(AggregationBuilders.terms(name + "s_value_dv").field("s_value_dv").collectMode(collectionModes[2])
.subAggregation(AggregationBuilders.terms(name + "l_value_dv").field("l_value_dv").collectMode(collectionModes[3])))))
.execute().actionGet();
if (j == 0) {
System.out.println("--> Loading : took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup (" + name + ") DONE");
System.out.println("--> Running (" + name + ")...");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch("test")
.setSize(0)
.setQuery(matchAllQuery())
.addAggregation(AggregationBuilders.terms(name + "s_value").field("s_value").collectMode(collectionModes[0])
.subAggregation(AggregationBuilders.terms(name + "l_value").field("l_value").collectMode(collectionModes[1])
.subAggregation(AggregationBuilders.terms(name + "s_value_dv").field("s_value_dv").collectMode(collectionModes[2])
.subAggregation(AggregationBuilders.terms(name + "l_value_dv").field("l_value_dv").collectMode(collectionModes[3])))))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
String[] nodeIds = new String[nodes.length];
for (int i = 0; i < nodeIds.length; i++) {
nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
}
ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
ByteSizeValue heapUsed = clusterStateResponse.getNodesStats().getJvm().getHeapUsed();
System.out.println("--> Heap used: " + heapUsed);
ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed, heapUsed);
}
}

View File

@ -1,350 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.ObjectScatterSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.node.Node;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.benchmark.search.aggregations.TermsAggregationSearchBenchmark.Method;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class TermsAggregationSearchAndIndexingBenchmark {
static String indexName = "test";
static String typeName = "type1";
static Random random = new Random();
static long COUNT = SizeValue.parseSizeValue("2m").singles();
static int BATCH = 1000;
static int NUMBER_OF_TERMS = (int) SizeValue.parseSizeValue("100k").singles();
static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
static int STRING_TERM_SIZE = 5;
static Node[] nodes;
public static void main(String[] args) throws Exception {
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put("refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName())
.build();
nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Client client = nodes[0].client();
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
try {
client.admin().indices().prepareCreate(indexName)
.addMapping(typeName, generateMapping("eager", "lazy"))
.get();
Thread.sleep(5000);
long startTime = System.currentTimeMillis();
ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
boolean added;
do {
added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
} while (!added);
}
String[] sValues = uniqueTerms.toArray(String.class);
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
XContentBuilder builder = jsonBuilder().startObject();
builder.field("id", Integer.toString(counter));
final String sValue = sValues[counter % sValues.length];
builder.field("s_value", sValue);
builder.field("s_value_dv", sValue);
for (String field : new String[] {"sm_value", "sm_value_dv"}) {
builder.startArray(field);
for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
}
builder.endArray();
}
request.add(Requests.indexRequest(indexName).type("type1").id(Integer.toString(counter))
.source(builder));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH));
}
}
System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().preparePutMapping(indexName)
.setType(typeName)
.setSource(generateMapping("lazy", "lazy"))
.get();
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
String[] nodeIds = new String[nodes.length];
for (int i = 0; i < nodeIds.length; i++) {
nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
}
List<TestRun> testRuns = new ArrayList<>();
testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
testRuns.add(new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
testRuns.add(new TestRun("Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
testRuns.add(new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));
List<TestResult> testResults = new ArrayList<>();
for (TestRun testRun : testRuns) {
client.admin().indices().preparePutMapping(indexName).setType(typeName)
.setSource(generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading)).get();
client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
SearchThread searchThread = new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
RefreshThread refreshThread = new RefreshThread(client);
System.out.println("--> Running '" + testRun.name + "' round...");
new Thread(refreshThread).start();
new Thread(searchThread).start();
Thread.sleep(2 * 60 * 1000);
refreshThread.stop();
searchThread.stop();
System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");
ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
testResults.add(new TestResult(testRun.name, refreshThread.avgRefreshTime, searchThread.avgQueryTime, fieldDataMemoryUsed));
}
System.out.println("----------------------------------------- SUMMARY ----------------------------------------------");
System.out.format(Locale.ENGLISH, "%30s%18s%15s%15s\n", "name", "avg refresh time", "avg query time", "fieldata size");
for (TestResult testResult : testResults) {
System.out.format(Locale.ENGLISH, "%30s%18s%15s%15s\n", testResult.name, testResult.avgRefreshTime, testResult.avgQueryTime, testResult.fieldDataSizeInMemory);
}
System.out.println("----------------------------------------- SUMMARY ----------------------------------------------");
client.close();
for (Node node : nodes) {
node.close();
}
}
static class RefreshThread implements Runnable {
private final Client client;
private volatile boolean run = true;
private volatile boolean stopped = false;
private volatile long avgRefreshTime = 0;
RefreshThread(Client client) throws IOException {
this.client = client;
}
@Override
public void run() {
long totalRefreshTime = 0;
int numExecutedRefreshed = 0;
while (run) {
long docIdLimit = COUNT;
for (long docId = 1; run && docId < docIdLimit;) {
try {
for (int j = 0; j < 8; j++) {
GetResponse getResponse = client
.prepareGet(indexName, "type1", String.valueOf(++docId))
.get();
client.prepareIndex(indexName, "type1", getResponse.getId())
.setSource(getResponse.getSource())
.get();
}
long startTime = System.currentTimeMillis();
client.admin().indices().prepareRefresh(indexName).execute().actionGet();
totalRefreshTime += System.currentTimeMillis() - startTime;
numExecutedRefreshed++;
Thread.sleep(500);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
avgRefreshTime = totalRefreshTime / numExecutedRefreshed;
stopped = true;
}
public void stop() throws InterruptedException {
run = false;
while (!stopped) {
Thread.sleep(100);
}
}
}
private static class TestRun {
final String name;
final String indexedFieldEagerLoading;
final String docValuesEagerLoading;
final String termsAggsField;
final String termsAggsExecutionHint;
private TestRun(String name, String indexedFieldEagerLoading, String docValuesEagerLoading, String termsAggsField, String termsAggsExecutionHint) {
this.name = name;
this.indexedFieldEagerLoading = indexedFieldEagerLoading;
this.docValuesEagerLoading = docValuesEagerLoading;
this.termsAggsField = termsAggsField;
this.termsAggsExecutionHint = termsAggsExecutionHint;
}
}
private static class TestResult {
final String name;
final TimeValue avgRefreshTime;
final TimeValue avgQueryTime;
final ByteSizeValue fieldDataSizeInMemory;
private TestResult(String name, long avgRefreshTime, long avgQueryTime, ByteSizeValue fieldDataSizeInMemory) {
this.name = name;
this.avgRefreshTime = TimeValue.timeValueMillis(avgRefreshTime);
this.avgQueryTime = TimeValue.timeValueMillis(avgQueryTime);
this.fieldDataSizeInMemory = fieldDataSizeInMemory;
}
}
static class SearchThread implements Runnable {
private final Client client;
private final String field;
private final String executionHint;
private volatile boolean run = true;
private volatile boolean stopped = false;
private volatile long avgQueryTime = 0;
SearchThread(Client client, String field, String executionHint) {
this.client = client;
this.field = field;
this.executionHint = executionHint;
}
@Override
public void run() {
long totalQueryTime = 0;
int numExecutedQueries = 0;
while (run) {
try {
SearchResponse searchResponse = Method.AGGREGATION.addTermsAgg(client.prepareSearch()
.setSize(0)
.setQuery(matchAllQuery()), "test", field, executionHint)
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
numExecutedQueries++;
} catch (Throwable e) {
e.printStackTrace();
}
}
avgQueryTime = totalQueryTime / numExecutedQueries;
stopped = true;
}
public void stop() throws InterruptedException {
run = false;
while (!stopped) {
Thread.sleep(100);
}
}
}
private static XContentBuilder generateMapping(String loading1, String loading2) throws IOException {
return jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("s_value")
.field("type", "string")
.field("index", "not_analyzed")
.startObject("fielddata")
.field("loading", loading1)
.endObject()
.endObject()
.startObject("s_value_dv")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("loading", loading2)
.field("format", "doc_values")
.endObject()
.endObject()
.endObject().endObject().endObject();
}
}

View File

@ -1,395 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.ObjectScatterSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class TermsAggregationSearchBenchmark {
static long COUNT = SizeValue.parseSizeValue("2m").singles();
static int BATCH = 1000;
static int QUERY_WARMUP = 10;
static int QUERY_COUNT = 100;
static int NUMBER_OF_TERMS = 200;
static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
static int STRING_TERM_SIZE = 5;
static Client client;
static Node[] nodes;
public enum Method {
AGGREGATION {
@Override
SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field));
}
@Override
SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
}
},
AGGREGATION_DEFERRED {
@Override
SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field).collectMode(SubAggCollectionMode.BREADTH_FIRST));
}
@Override
SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).collectMode(SubAggCollectionMode.BREADTH_FIRST).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
}
};
abstract SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint);
abstract SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField);
}
public static void main(String[] args) throws Exception {
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", TermsAggregationSearchBenchmark.class.getSimpleName())
.build();
nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).put("path.home", ".").build()).start();
}
Node clientNode = new Node(settingsBuilder().put(settings).put("name", "client").put("node.client", true).build()).start();
client = clientNode.client();
Thread.sleep(10000);
try {
client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
.startObject()
.startObject("type1")
.startObject("properties")
.startObject("s_value_dv")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("sm_value_dv")
.field("type", "string")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("l_value_dv")
.field("type", "long")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.startObject("lm_value_dv")
.field("type", "long")
.field("index", "no")
.startObject("fielddata")
.field("format", "doc_values")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject())).actionGet();
ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
for (int i = 0; i < NUMBER_OF_TERMS; i++) {
boolean added;
do {
added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
} while (!added);
}
String[] sValues = uniqueTerms.toArray(String.class);
uniqueTerms = null;
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
XContentBuilder builder = jsonBuilder().startObject();
builder.field("id", Integer.toString(counter));
final String sValue = sValues[ThreadLocalRandom.current().nextInt(sValues.length)];
final long lValue = ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS);
builder.field("s_value", sValue);
builder.field("l_value", lValue);
builder.field("s_value_dv", sValue);
builder.field("l_value_dv", lValue);
for (String field : new String[] {"sm_value", "sm_value_dv"}) {
builder.startArray(field);
for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
}
builder.endArray();
}
for (String field : new String[] {"lm_value", "lm_value_dv"}) {
builder.startArray(field);
for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
builder.value(ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS));
}
builder.endArray();
}
builder.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(builder));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
COUNT = client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
System.out.println("--> Number of docs in index: " + COUNT);
List<StatsResult> stats = new ArrayList<>();
stats.add(terms("terms_agg_s", Method.AGGREGATION, "s_value", null));
stats.add(terms("terms_agg_s_dv", Method.AGGREGATION, "s_value_dv", null));
stats.add(terms("terms_agg_map_s", Method.AGGREGATION, "s_value", "map"));
stats.add(terms("terms_agg_map_s_dv", Method.AGGREGATION, "s_value_dv", "map"));
stats.add(terms("terms_agg_def_s", Method.AGGREGATION_DEFERRED, "s_value", null));
stats.add(terms("terms_agg_def_s_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", null));
stats.add(terms("terms_agg_def_map_s", Method.AGGREGATION_DEFERRED, "s_value", "map"));
stats.add(terms("terms_agg_def_map_s_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "map"));
stats.add(terms("terms_agg_l", Method.AGGREGATION, "l_value", null));
stats.add(terms("terms_agg_l_dv", Method.AGGREGATION, "l_value_dv", null));
stats.add(terms("terms_agg_def_l", Method.AGGREGATION_DEFERRED, "l_value", null));
stats.add(terms("terms_agg_def_l_dv", Method.AGGREGATION_DEFERRED, "l_value_dv", null));
stats.add(terms("terms_agg_sm", Method.AGGREGATION, "sm_value", null));
stats.add(terms("terms_agg_sm_dv", Method.AGGREGATION, "sm_value_dv", null));
stats.add(terms("terms_agg_map_sm", Method.AGGREGATION, "sm_value", "map"));
stats.add(terms("terms_agg_map_sm_dv", Method.AGGREGATION, "sm_value_dv", "map"));
stats.add(terms("terms_agg_def_sm", Method.AGGREGATION_DEFERRED, "sm_value", null));
stats.add(terms("terms_agg_def_sm_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", null));
stats.add(terms("terms_agg_def_map_sm", Method.AGGREGATION_DEFERRED, "sm_value", "map"));
stats.add(terms("terms_agg_def_map_sm_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "map"));
stats.add(terms("terms_agg_lm", Method.AGGREGATION, "lm_value", null));
stats.add(terms("terms_agg_lm_dv", Method.AGGREGATION, "lm_value_dv", null));
stats.add(terms("terms_agg_def_lm", Method.AGGREGATION_DEFERRED, "lm_value", null));
stats.add(terms("terms_agg_def_lm_dv", Method.AGGREGATION_DEFERRED, "lm_value_dv", null));
stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_s_l", Method.AGGREGATION_DEFERRED, "s_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_def_s_l_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_s_lm", Method.AGGREGATION_DEFERRED, "s_value", "lm_value", null));
stats.add(termsStats("terms_stats_agg_def_s_lm_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "lm_value_dv", null));
stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_sm_l", Method.AGGREGATION_DEFERRED, "sm_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_def_sm_l_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_s_l", Method.AGGREGATION_DEFERRED, "s_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_def_s_l_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_s_lm", Method.AGGREGATION_DEFERRED, "s_value", "lm_value", null));
stats.add(termsStats("terms_stats_agg_def_s_lm_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "lm_value_dv", null));
stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
stats.add(termsStats("terms_stats_agg_def_sm_l", Method.AGGREGATION_DEFERRED, "sm_value", "l_value", null));
stats.add(termsStats("terms_stats_agg_def_sm_l_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "l_value_dv", null));
System.out.println("------------------ SUMMARY ----------------------------------------------");
System.out.format(Locale.ENGLISH, "%35s%10s%10s%15s\n", "name", "took", "millis", "fieldata size");
for (StatsResult stat : stats) {
System.out.format(Locale.ENGLISH, "%35s%10s%10d%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed);
}
System.out.println("------------------ SUMMARY ----------------------------------------------");
clientNode.close();
for (Node node : nodes) {
node.close();
}
}
public static class StatsResult {
final String name;
final long took;
final ByteSizeValue fieldDataMemoryUsed;
public StatsResult(String name, long took, ByteSizeValue fieldDataMemoryUsed) {
this.name = name;
this.took = took;
this.fieldDataMemoryUsed = fieldDataMemoryUsed;
}
}
private static StatsResult terms(String name, Method method, String field, String executionHint) {
long totalQueryTime;// LM VALUE
client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
System.gc();
System.out.println("--> Warmup (" + name + ")...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch("test")
.setSize(0)
.setQuery(matchAllQuery()), name, field, executionHint)
.execute().actionGet();
if (j == 0) {
System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup (" + name + ") DONE");
System.out.println("--> Running (" + name + ")...");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
.setSize(0)
.setQuery(matchAllQuery()), name, field, executionHint)
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
String[] nodeIds = new String[nodes.length];
for (int i = 0; i < nodeIds.length; i++) {
nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
}
ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed);
}
private static StatsResult termsStats(String name, Method method, String keyField, String valueField, String executionHint) {
long totalQueryTime;
client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
System.gc();
System.out.println("--> Warmup (" + name + ")...");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
.setSize(0)
.setQuery(matchAllQuery()), name, keyField, valueField)
.execute().actionGet();
if (j == 0) {
System.out.println("--> Loading (" + name + "): took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
}
System.out.println("--> Warmup (" + name + ") DONE");
System.out.println("--> Running (" + name + ")...");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
.setSize(0)
.setQuery(matchAllQuery()), name, keyField, valueField)
.execute().actionGet();
if (searchResponse.getHits().totalHits() != COUNT) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Terms stats agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
return new StatsResult(name, totalQueryTime, ByteSizeValue.parseBytesSizeValue("0b", "StatsResult"));
}
}

View File

@ -1,259 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.node.Node;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class TimeDataHistogramAggregationBenchmark {
static long COUNT = SizeValue.parseSizeValue("5m").singles();
static long TIME_PERIOD = 24 * 3600 * 1000;
static int BATCH = 100;
static int QUERY_WARMUP = 50;
static int QUERY_COUNT = 500;
static IndexFieldData.CommonSettings.MemoryStorageFormat MEMORY_FORMAT = IndexFieldData.CommonSettings.MemoryStorageFormat.PAGED;
static double ACCEPTABLE_OVERHEAD_RATIO = 0.5;
static float MATCH_PERCENTAGE = 0.1f;
static Client client;
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put("node.local", true)
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", TimeDataHistogramAggregationBenchmark.class.getSimpleName())
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
client = nodes[0].client();
Thread.sleep(10000);
try {
client.admin().indices().create(createIndexRequest("test")).actionGet();
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
long[] currentTimeInMillis1 = new long[]{System.currentTimeMillis()};
long[] currentTimeInMillis2 = new long[]{System.currentTimeMillis()};
long startTimeInMillis = currentTimeInMillis1[0];
long averageMillisChange = TIME_PERIOD / COUNT * 2;
long backwardSkew = Math.max(1, (long) (averageMillisChange * 0.1));
long bigOutOfOrder = 1;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
XContentBuilder builder = jsonBuilder().startObject();
builder.field("id", Integer.toString(counter));
// move forward in time and sometimes a little bit back (delayed delivery)
long diff = ThreadLocalRandom.current().nextLong(2 * averageMillisChange + 2 * backwardSkew) - backwardSkew;
long[] currentTime = counter % 2 == 0 ? currentTimeInMillis1 : currentTimeInMillis2;
currentTime[0] += diff;
if (ThreadLocalRandom.current().nextLong(100) <= bigOutOfOrder) {
builder.field("l_value", currentTime[0] - 60000); // 1m delays
} else {
builder.field("l_value", currentTime[0]);
}
builder.endObject();
request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
.source(builder));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
System.out.println("Time range 1: " + (currentTimeInMillis1[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
System.out.println("Time range 2: " + (currentTimeInMillis2[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
System.out.println("--> optimizing index");
client.admin().indices().prepareForceMerge().setMaxNumSegments(1).get();
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
COUNT = client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
System.out.println("--> Number of docs in index: " + COUNT);
// load with the reverse options to make sure jit doesn't optimize one away
setMapping(ACCEPTABLE_OVERHEAD_RATIO, MEMORY_FORMAT.equals(IndexFieldData.CommonSettings.MemoryStorageFormat.PACKED) ? IndexFieldData.CommonSettings.MemoryStorageFormat.PAGED : IndexFieldData.CommonSettings.MemoryStorageFormat.PACKED);
warmUp("hist_l", "l_value", MATCH_PERCENTAGE);
setMapping(ACCEPTABLE_OVERHEAD_RATIO, MEMORY_FORMAT);
warmUp("hist_l", "l_value", MATCH_PERCENTAGE);
List<StatsResult> stats = new ArrayList<>();
stats.add(measureAgg("hist_l", "l_value", MATCH_PERCENTAGE));
NodesStatsResponse nodeStats = client.admin().cluster().prepareNodesStats(nodes[0].settings().get("name")).clear()
.setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.FieldData)).get();
System.out.println("------------------ SUMMARY -------------------------------");
System.out.println("docs: " + COUNT);
System.out.println("match percentage: " + MATCH_PERCENTAGE);
System.out.println("memory format hint: " + MEMORY_FORMAT);
System.out.println("acceptable_overhead_ratio: " + ACCEPTABLE_OVERHEAD_RATIO);
System.out.println("field data: " + nodeStats.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.format(Locale.ROOT, "%25s%10s%10s\n", "name", "took", "millis");
for (StatsResult stat : stats) {
System.out.format(Locale.ROOT, "%25s%10s%10d\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT));
}
System.out.println("------------------ SUMMARY -------------------------------");
for (Node node : nodes) {
node.close();
}
}
protected static void setMapping(double acceptableOverheadRatio, IndexFieldData.CommonSettings.MemoryStorageFormat fielddataStorageFormat) throws IOException {
XContentBuilder mapping = JsonXContent.contentBuilder();
mapping.startObject().startObject("type1").startObject("properties").startObject("l_value")
.field("type", "long")
.startObject("fielddata")
.field("acceptable_transient_overhead_ratio", acceptableOverheadRatio)
.field("acceptable_overhead_ratio", acceptableOverheadRatio)
.field(IndexFieldData.CommonSettings.SETTING_MEMORY_STORAGE_HINT, fielddataStorageFormat.name().toLowerCase(Locale.ROOT))
.endObject()
.endObject().endObject().endObject().endObject();
client.admin().indices().preparePutMapping("test").setType("type1").setSource(mapping).get();
}
static class StatsResult {
final String name;
final long took;
StatsResult(String name, long took) {
this.name = name;
this.took = took;
}
}
private static SearchResponse doTermsAggsSearch(String name, String field, float matchPercentage) {
Map<String, Object> params = new HashMap<>();
params.put("matchP", matchPercentage);
SearchResponse response = client.prepareSearch()
.setSize(0)
.setQuery(
QueryBuilders.constantScoreQuery(QueryBuilders.scriptQuery(new Script("random()<matchP", ScriptType.INLINE, null,
params))))
.addAggregation(AggregationBuilders.histogram(name).field(field).interval(3600 * 1000)).get();
if (response.getHits().totalHits() < COUNT * matchPercentage * 0.7) {
System.err.println("--> warning - big deviation from expected count: " + response.getHits().totalHits() + " expected: " + COUNT * matchPercentage);
}
return response;
}
private static StatsResult measureAgg(String name, String field, float matchPercentage) {
long totalQueryTime;// LM VALUE
System.out.println("--> Running (" + name + ")...");
totalQueryTime = 0;
long previousCount = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = doTermsAggsSearch(name, field, matchPercentage);
if (previousCount == 0) {
previousCount = searchResponse.getHits().getTotalHits();
} else if (searchResponse.getHits().totalHits() != previousCount) {
System.err.println("*** HIT COUNT CHANGE -> CACHE EXPIRED? ***");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Histogram aggregations (" + field + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
return new StatsResult(name, totalQueryTime);
}
private static void warmUp(String name, String field, float matchPercentage) {
System.out.println("--> Warmup (" + name + ")...");
client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = doTermsAggsSearch(name, field, matchPercentage);
if (j == 0) {
System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
}
}
System.out.println("--> Warmup (" + name + ") DONE");
}
}

View File

@ -1,213 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.child;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.node.Node;
import java.util.Arrays;
import java.util.Random;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
/**
*
*/
public class ChildSearchAndIndexingBenchmark {
static int PARENT_COUNT = (int) SizeValue.parseSizeValue("1m").singles();
static int NUM_CHILDREN_PER_PARENT = 12;
static int QUERY_VALUE_RATIO_PER_PARENT = 3;
static int QUERY_COUNT = 50;
static String indexName = "test";
static Random random = new Random();
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", ChildSearchAndIndexingBenchmark.class.getSimpleName())
.build();
Node node1 = new Node(settingsBuilder().put(settings).put("name", "node1").build()).start();
Client client = node1.client();
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
try {
client.admin().indices().create(createIndexRequest(indexName)).actionGet();
client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
long startTime = System.currentTimeMillis();
ParentChildIndexGenerator generator = new ParentChildIndexGenerator(client, PARENT_COUNT, NUM_CHILDREN_PER_PARENT, QUERY_VALUE_RATIO_PER_PARENT);
generator.index();
System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
SearchThread searchThread = new SearchThread(client);
new Thread(searchThread).start();
IndexThread indexThread = new IndexThread(client);
new Thread(indexThread).start();
System.in.read();
indexThread.stop();
searchThread.stop();
client.close();
node1.close();
}
static class IndexThread implements Runnable {
private final Client client;
private volatile boolean run = true;
IndexThread(Client client) {
this.client = client;
}
@Override
public void run() {
while (run) {
int childIdLimit = PARENT_COUNT * NUM_CHILDREN_PER_PARENT;
for (int childId = 1; run && childId < childIdLimit;) {
try {
for (int j = 0; j < 8; j++) {
GetResponse getResponse = client
.prepareGet(indexName, "child", String.valueOf(++childId))
.setFields("_source", "_parent")
.setRouting("1") // Doesn't matter what value, since there is only one shard
.get();
client.prepareIndex(indexName, "child", Integer.toString(childId) + "_" + j)
.setParent(getResponse.getField("_parent").getValue().toString())
.setSource(getResponse.getSource())
.get();
}
client.admin().indices().prepareRefresh(indexName).execute().actionGet();
Thread.sleep(1000);
if (childId % 500 == 0) {
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.clear().setIndices(true).execute().actionGet();
System.out.println("Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted());
}
} catch (Throwable e) {
e.printStackTrace();
}
}
}
}
public void stop() {
run = false;
}
}
static class SearchThread implements Runnable {
private final Client client;
private final int numValues;
private volatile boolean run = true;
SearchThread(Client client) {
this.client = client;
this.numValues = NUM_CHILDREN_PER_PARENT / NUM_CHILDREN_PER_PARENT;
}
@Override
public void run() {
while (run) {
try {
long totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasChildQuery("child", termQuery("field2", "value" + random.nextInt(numValues)))
)
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter with term filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasChildQuery("child", matchAllQuery()))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
Thread.sleep(1000);
} catch (Throwable e) {
e.printStackTrace();
}
}
}
public void stop() {
run = false;
}
}
}

View File

@ -1,347 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.child;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.children.Children;
import java.util.Arrays;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
/**
*
*/
public class ChildSearchBenchmark {
/*
Run: MAVEN_OPTS=-Xmx4g mvn test-compile exec:java -Dexec.mainClass="org.elasticsearch.benchmark.search.child.ChildSearchBenchmark" -Dexec.classpathScope="test" -Dexec.args="bwc false"
*/
public static void main(String[] args) throws Exception {
boolean bwcMode = false;
int numParents = (int) SizeValue.parseSizeValue("2m").singles();;
if (args.length % 2 != 0) {
throw new IllegalArgumentException("Uneven number of arguments");
}
for (int i = 0; i < args.length; i += 2) {
String value = args[i + 1];
if ("--bwc_mode".equals(args[i])) {
bwcMode = Boolean.valueOf(value);
} else if ("--num_parents".equals(args[i])) {
numParents = Integer.valueOf(value);
}
}
Settings.Builder settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", ChildSearchBenchmark.class.getSimpleName());
// enable bwc parent child mode:
if (bwcMode) {
settings.put("tests.mock.version", Version.V_1_6_0);
}
Node node1 = new Node(settingsBuilder().put(settings).put("name", "node1").build()).start();
Client client = node1.client();
int CHILD_COUNT = 15;
int QUERY_VALUE_RATIO = 3;
int QUERY_WARMUP = 10;
int QUERY_COUNT = 20;
String indexName = "test";
ParentChildIndexGenerator parentChildIndexGenerator = new ParentChildIndexGenerator(client, numParents, CHILD_COUNT, QUERY_VALUE_RATIO);
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
try {
client.admin().indices().create(createIndexRequest(indexName)).actionGet();
client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
long startTime = System.currentTimeMillis();
parentChildIndexGenerator.index();
System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareSearch(indexName).setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
System.out.println("--> Running just child query");
// run just the child query, warm up first
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
}
long totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
// run parent child constant query
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_child filter with match_all child query");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasChildQuery("child", matchAllQuery()))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running children agg");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(matchQuery("field1", parentChildIndexGenerator.getQueryValue()))
.addAggregation(
AggregationBuilders.children("to-child").childType("child")
)
.execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
Children children = searchResponse.getAggregations().get("to-child");
if (j % 10 == 0) {
System.out.println("--> children doc count [" + j + "], got [" + children.getDocCount() + "]");
}
}
System.out.println("--> children agg, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running children agg with match_all");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.addAggregation(
AggregationBuilders.children("to-child").childType("child")
)
.execute().actionGet();
totalQueryTime += searchResponse.getTookInMillis();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
Children children = searchResponse.getAggregations().get("to-child");
if (j % 10 == 0) {
System.out.println("--> children doc count [" + j + "], got [" + children.getDocCount() + "]");
}
}
System.out.println("--> children agg, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
// run parent child constant query
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
}
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_parent filter with match_all parent query ");
totalQueryTime = 0;
for (int j = 1; j <= QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
boolQuery()
.must(matchAllQuery())
.filter(hasParentQuery("parent", matchAllQuery()))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent filter with match_all parent query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
System.out.println("--> Running has_child query with score type");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreMode(ScoreMode.Max)).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreMode(ScoreMode.Max)).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", matchAllQuery()).scoreMode(ScoreMode.Max)).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.out.println("--> Running has_parent query with score type");
// run parent child score query
for (int j = 0; j < QUERY_WARMUP; j++) {
client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).score(true)).execute().actionGet();
}
totalQueryTime = 0;
for (int j = 1; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).score(true)).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
totalQueryTime = 0;
for (int j = 1; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", matchAllQuery()).score(true)).execute().actionGet();
if (j % 10 == 0) {
System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_parent query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
System.gc();
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
client.close();
node1.close();
}
}

View File

@ -1,207 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.child;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import java.io.IOException;
import java.util.Arrays;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
/**
*
*/
public class ChildSearchShortCircuitBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", ChildSearchShortCircuitBenchmark.class.getSimpleName())
.build();
Node node1 = new Node(settingsBuilder().put(settings).put("name", "node1").build()).start();
Client client = node1.client();
long PARENT_COUNT = SizeValue.parseSizeValue("10M").singles();
int BATCH = 100;
int QUERY_WARMUP = 5;
int QUERY_COUNT = 25;
String indexName = "test";
client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
try {
client.admin().indices().create(createIndexRequest(indexName)).actionGet();
client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
.startObject("_parent").field("type", "parent").endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents");
long ITERS = PARENT_COUNT / BATCH;
int i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
.source(parentSource(counter)));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
int id = 0;
for (i = 1; i <= PARENT_COUNT; i *= 2) {
int parentId = 1;
for (int j = 0; j < i; j++) {
client.prepareIndex(indexName, "child", Integer.toString(id++))
.setParent(Integer.toString(parentId++))
.setSource(childSource(i))
.execute().actionGet();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime());
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareSearch(indexName).setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
System.out.println("--> Running just child query");
// run just the child query, warm up first
for (int i = 1; i <= 10000; i *= 2) {
SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(matchQuery("child.field2", i)).execute().actionGet();
System.out.println("--> Warmup took["+ i +"]: " + searchResponse.getTook());
if (searchResponse.getHits().totalHits() != i) {
System.err.println("--> mismatch on hits");
}
}
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
// run parent child constant query
for (int j = 1; j < QUERY_WARMUP; j *= 2) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
hasChildQuery("child", matchQuery("field2", j))
)
.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
if (searchResponse.getHits().totalHits() != j) {
System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + PARENT_COUNT + "]");
}
}
long totalQueryTime = 0;
for (int i = 1; i < PARENT_COUNT; i *= 2) {
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(boolQuery().must(matchAllQuery()).filter(hasChildQuery("child", matchQuery("field2", i))))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != i) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child filter " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
}
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
totalQueryTime = 0;
for (int i = 1; i < PARENT_COUNT; i *= 2) {
for (int j = 0; j < QUERY_COUNT; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(hasChildQuery("child", matchQuery("field2", i)).scoreMode(ScoreMode.Max))
.execute().actionGet();
if (searchResponse.getHits().totalHits() != i) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> has_child query " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
}
System.gc();
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
client.close();
node1.close();
}
private static XContentBuilder parentSource(int val) throws IOException {
return jsonBuilder().startObject().field("field1", Integer.toString(val)).endObject();
}
private static XContentBuilder childSource(int val) throws IOException {
return jsonBuilder().startObject().field("field2", Integer.toString(val)).endObject();
}
}

View File

@ -1,120 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.child;
import com.carrotsearch.hppc.ObjectArrayList;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import java.util.Random;
/**
*/
public class ParentChildIndexGenerator {
private final static Random RANDOM = new Random();
private final Client client;
private final int numParents;
private final int numChildrenPerParent;
private final int queryValueRatio;
public ParentChildIndexGenerator(Client client, int numParents, int numChildrenPerParent, int queryValueRatio) {
this.client = client;
this.numParents = numParents;
this.numChildrenPerParent = numChildrenPerParent;
this.queryValueRatio = queryValueRatio;
}
public void index() {
// Memory intensive...
ObjectHashSet<String> usedParentIds = new ObjectHashSet<>(numParents, 0.5d);
ObjectArrayList<ParentDocument> parents = new ObjectArrayList<>(numParents);
for (int i = 0; i < numParents; i++) {
String parentId;
do {
parentId = RandomStrings.randomAsciiOfLength(RANDOM, 10);
} while (!usedParentIds.add(parentId));
String[] queryValues = new String[numChildrenPerParent];
for (int j = 0; j < numChildrenPerParent; j++) {
queryValues[j] = getQueryValue();
}
parents.add(new ParentDocument(parentId, queryValues));
}
int indexCounter = 0;
int childIdCounter = 0;
while (!parents.isEmpty()) {
BulkRequestBuilder request = client.prepareBulk();
for (int i = 0; !parents.isEmpty() && i < 100; i++) {
int index = RANDOM.nextInt(parents.size());
ParentDocument parentDocument = parents.get(index);
if (parentDocument.indexCounter == -1) {
request.add(Requests.indexRequest("test").type("parent")
.id(parentDocument.parentId)
.source("field1", getQueryValue()));
} else {
request.add(Requests.indexRequest("test").type("child")
.parent(parentDocument.parentId)
.id(String.valueOf(++childIdCounter))
.source("field2", parentDocument.queryValues[parentDocument.indexCounter]));
}
if (++parentDocument.indexCounter == parentDocument.queryValues.length) {
parents.remove(index);
}
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
indexCounter += response.getItems().length;
if (indexCounter % 100000 == 0) {
System.out.println("--> Indexed " + indexCounter + " documents");
}
}
}
public String getQueryValue() {
return "value" + RANDOM.nextInt(numChildrenPerParent / queryValueRatio);
}
class ParentDocument {
final String parentId;
final String[] queryValues;
int indexCounter;
ParentDocument(String parentId, String[] queryValues) {
this.parentId = parentId;
this.queryValues = queryValues;
this.indexCounter = -1;
}
}
}

View File

@ -1,207 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.geo;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*/
public class GeoDistanceSearchBenchmark {
public static void main(String[] args) throws Exception {
Settings settings = Settings.builder().put("cluste.name", GeoDistanceSearchBenchmark.class.getSimpleName()).build();
Node node = new Node(settings).start();
Client client = node.client();
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("Failed to wait for green status, bailing");
exit(1);
}
final long NUM_DOCS = SizeValue.parseSizeValue("1m").singles();
final long NUM_WARM = 50;
final long NUM_RUNS = 100;
if (client.admin().indices().prepareExists("test").execute().actionGet().isExists()) {
System.out.println("Found an index, count: " + client.prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits());
} else {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
.endObject().endObject().string();
client.admin().indices().prepareCreate("test")
.setSettings(Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
.addMapping("type1", mapping)
.execute().actionGet();
System.err.println("--> Indexing [" + NUM_DOCS + "]");
for (long i = 0; i < NUM_DOCS; ) {
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "New York")
.startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
.endObject()).execute().actionGet();
// to NY: 5.286 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Times Square")
.startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
.endObject()).execute().actionGet();
// to NY: 0.4621 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Tribeca")
.startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
.endObject()).execute().actionGet();
// to NY: 1.258 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Soho")
.startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
.endObject()).execute().actionGet();
// to NY: 8.572 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Brooklyn")
.startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
.endObject()).execute().actionGet();
if ((i % 10000) == 0) {
System.err.println("--> indexed " + i);
}
}
System.err.println("Done indexed");
client.admin().indices().prepareFlush("test").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
}
System.err.println("--> Warming up (ARC) - optimize_bbox");
long start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "memory");
}
long totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - optimize_bbox (memory) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - optimize_bbox (memory)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (ARC) - optimize_bbox (indexed)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "indexed");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - optimize_bbox (indexed)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "indexed");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (ARC) - no optimize_bbox");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "none");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - no optimize_bbox " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - no optimize_bbox");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "none");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - no optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (SLOPPY_ARC)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.SLOPPY_ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (SLOPPY_ARC) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (SLOPPY_ARC)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.SLOPPY_ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (SLOPPY_ARC) " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (PLANE)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.PLANE, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (PLANE) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (PLANE)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.PLANE, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (PLANE) " + (totalTime / NUM_RUNS) + "ms");
node.close();
}
public static void run(Client client, GeoDistance geoDistance, String optimizeBbox) {
client.prepareSearch() // from NY
.setSize(0)
.setQuery(boolQuery().must(matchAllQuery()).filter(geoDistanceQuery("location")
.distance("2km")
.optimizeBbox(optimizeBbox)
.geoDistance(geoDistance)
.point(40.7143528, -74.0059731)))
.execute().actionGet();
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
private static void exit(int status) {
System.exit(status);
}
}

View File

@ -1,189 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.nested;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*/
public class NestedSearchBenchMark {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
Node node1 = new Node(settingsBuilder().put(settings).put("name", "node1").build()).start();
Client client = node1.client();
int count = (int) SizeValue.parseSizeValue("1m").singles();
int nestedCount = 10;
int rootDocs = count / nestedCount;
int batch = 100;
int queryWarmup = 5;
int queryCount = 500;
String indexName = "test";
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth()
.setWaitForGreenStatus().execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
try {
client.admin().indices().prepareCreate(indexName)
.addMapping("type", XContentFactory.jsonBuilder()
.startObject()
.startObject("type")
.startObject("properties")
.startObject("field1")
.field("type", "integer")
.endObject()
.startObject("field2")
.field("type", "nested")
.startObject("properties")
.startObject("field3")
.field("type", "integer")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
).execute().actionGet();
clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
StopWatch stopWatch = new StopWatch().start();
System.out.println("--> Indexing [" + rootDocs + "] root documents and [" + (rootDocs * nestedCount) + "] nested objects");
long ITERS = rootDocs / batch;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client.prepareBulk();
for (int j = 0; j < batch; j++) {
counter++;
XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
.field("field1", counter)
.startArray("field2");
for (int k = 0; k < nestedCount; k++) {
doc = doc.startObject()
.field("field3", k)
.endObject();
}
doc = doc.endArray();
request.add(
Requests.indexRequest(indexName).type("type").id(Integer.toString(counter)).source(doc)
);
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("--> failures...");
}
if (((i * batch) % 10000) == 0) {
System.out.println("--> Indexed " + (i * batch) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (count * (1 + nestedCount))) / stopWatch.totalTime().secondsFrac()));
} catch (Exception e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh().execute().actionGet();
System.out.println("--> Number of docs in index: " + client.prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits());
NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
System.out.println("--> Running match_all with sorting on nested field");
// run just the child query, warm up first
for (int j = 0; j < queryWarmup; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addSort(
SortBuilders.fieldSort("field2.field3")
.setNestedPath("field2")
.sortMode("avg")
.order(SortOrder.ASC)
)
.execute().actionGet();
if (j == 0) {
System.out.println("--> Warmup took: " + searchResponse.getTook());
}
if (searchResponse.getHits().totalHits() != rootDocs) {
System.err.println("--> mismatch on hits");
}
}
long totalQueryTime = 0;
for (int j = 0; j < queryCount; j++) {
SearchResponse searchResponse = client.prepareSearch()
.setQuery(matchAllQuery())
.addSort(
SortBuilders.fieldSort("field2.field3")
.setNestedPath("field2")
.sortMode("avg")
.order(j % 2 == 0 ? SortOrder.ASC : SortOrder.DESC)
)
.execute().actionGet();
if (searchResponse.getHits().totalHits() != rootDocs) {
System.err.println("--> mismatch on hits");
}
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> Sorting by nested fields took: " + (totalQueryTime / queryCount) + "ms");
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).execute().actionGet();
System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
}
}

View File

@ -1,153 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.scroll;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.monitor.jvm.JvmStats;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortOrder;
import java.util.Locale;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
/**
*/
public class ScrollSearchBenchmark {
// Run with: -Xms1G -Xms1G
public static void main(String[] args) {
String indexName = "test";
String typeName = "type";
long numDocs = SizeValue.parseSizeValue("300k").singles();
int requestSize = 50;
Settings settings = settingsBuilder()
.put(SETTING_NUMBER_OF_SHARDS, 3)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.put("cluster.name", ScrollSearchBenchmark.class.getSimpleName())
.build();
Node[] nodes = new Node[3];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new Node(settingsBuilder().put(settings).put("name", "node" + i).build()).start();
}
Client client = nodes[0].client();
try {
client.admin().indices().prepareCreate(indexName).get();
for (int counter = 1; counter <= numDocs;) {
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
for (int bulkCounter = 0; bulkCounter < 100; bulkCounter++) {
if (counter > numDocs) {
break;
}
bulkRequestBuilder.add(
client.prepareIndex(indexName, typeName, String.valueOf(counter))
.setSource("field1", counter++)
);
}
int indexedDocs = counter - 1;
if (indexedDocs % 100000 == 0) {
System.out.printf(Locale.ENGLISH, "--> Indexed %d so far\n", indexedDocs);
}
bulkRequestBuilder.get();
}
} catch (IndexAlreadyExistsException e) {
System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> Timed out waiting for cluster health");
}
}
client.admin().indices().prepareRefresh(indexName).get();
System.out.printf(Locale.ENGLISH, "--> Number of docs in index: %d\n", client.prepareSearch().setSize(0).get().getHits().totalHits());
Long counter = numDocs;
SearchResponse searchResponse = client.prepareSearch(indexName)
.addSort("field1", SortOrder.DESC)
.setSize(requestSize)
.setScroll("10m").get();
if (searchResponse.getHits().getTotalHits() != numDocs) {
System.err.printf(Locale.ENGLISH, "Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
}
if (searchResponse.getHits().hits().length != requestSize) {
System.err.printf(Locale.ENGLISH, "Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
}
for (SearchHit hit : searchResponse.getHits()) {
if (!hit.sortValues()[0].equals(counter--)) {
System.err.printf(Locale.ENGLISH, "Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
}
}
String scrollId = searchResponse.getScrollId();
int scrollRequestCounter = 0;
long sumTimeSpent = 0;
while (true) {
long timeSpent = System.currentTimeMillis();
searchResponse = client.prepareSearchScroll(scrollId).setScroll("10m").get();
sumTimeSpent += (System.currentTimeMillis() - timeSpent);
scrollRequestCounter++;
if (searchResponse.getHits().getTotalHits() != numDocs) {
System.err.printf(Locale.ENGLISH, "Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
}
if (scrollRequestCounter % 20 == 0) {
long avgTimeSpent = sumTimeSpent / 20;
JvmStats.Mem mem = JvmStats.jvmStats().getMem();
System.out.printf(Locale.ENGLISH, "Cursor location=%d, avg time spent=%d ms\n", (requestSize * scrollRequestCounter), (avgTimeSpent));
System.out.printf(Locale.ENGLISH, "heap max=%s, used=%s, percentage=%d\n", mem.getHeapMax(), mem.getHeapUsed(), mem.getHeapUsedPercent());
sumTimeSpent = 0;
}
if (searchResponse.getHits().hits().length == 0) {
break;
}
if (searchResponse.getHits().hits().length != requestSize) {
System.err.printf(Locale.ENGLISH, "Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
}
for (SearchHit hit : searchResponse.getHits()) {
if (!hit.sortValues()[0].equals(counter--)) {
System.err.printf(Locale.ENGLISH, "Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
}
}
scrollId = searchResponse.getScrollId();
}
if (counter != 0) {
System.err.printf(Locale.ENGLISH, "Counter should be 0 because scroll has been consumed\n");
}
for (Node node : nodes) {
node.close();
}
}
}

View File

@ -1,70 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.time;
import org.elasticsearch.common.StopWatch;
import java.util.concurrent.CountDownLatch;
/**
*
*/
public class SimpleTimeBenchmark {
private static boolean USE_NANO_TIME = false;
private static long NUMBER_OF_ITERATIONS = 1000000;
private static int NUMBER_OF_THREADS = 100;
public static void main(String[] args) throws Exception {
StopWatch stopWatch = new StopWatch().start();
System.out.println("Running " + NUMBER_OF_ITERATIONS);
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.currentTimeMillis();
}
System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
if (USE_NANO_TIME) {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.nanoTime();
}
} else {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
System.currentTimeMillis();
}
}
latch.countDown();
}
});
}
stopWatch = new StopWatch().start();
for (Thread thread : threads) {
thread.start();
}
latch.await();
stopWatch.stop();
System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
}
}

View File

@ -1,59 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.transport;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.transport.TransportRequest;
import java.io.IOException;
/**
*
*/
public class BenchmarkMessageRequest extends TransportRequest {
long id;
byte[] payload;
public BenchmarkMessageRequest(long id, byte[] payload) {
this.id = id;
this.payload = payload;
}
public BenchmarkMessageRequest() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
payload = new byte[in.readVInt()];
in.readFully(payload);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(id);
out.writeVInt(payload.length);
out.writeBytes(payload);
}
}

View File

@ -1,72 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.transport;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.transport.TransportResponse;
import java.io.IOException;
/**
*
*/
public class BenchmarkMessageResponse extends TransportResponse {
long id;
byte[] payload;
public BenchmarkMessageResponse(BenchmarkMessageRequest request) {
this.id = request.id;
this.payload = request.payload;
}
public BenchmarkMessageResponse(long id, byte[] payload) {
this.id = id;
this.payload = payload;
}
public BenchmarkMessageResponse() {
}
public long id() {
return id;
}
public byte[] payload() {
return payload;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
payload = new byte[in.readVInt()];
in.readFully(payload);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(id);
out.writeVInt(payload.length);
out.writeBytes(payload);
}
}

View File

@ -1,146 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.transport;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
import org.elasticsearch.transport.netty.NettyTransport;
import java.net.InetAddress;
import java.util.concurrent.CountDownLatch;
/**
*
*/
public class BenchmarkNettyLargeMessages {
public static void main(String[] args) throws Exception {
final ByteSizeValue payloadSize = new ByteSizeValue(10, ByteSizeUnit.MB);
final int NUMBER_OF_ITERATIONS = 100000;
final int NUMBER_OF_CLIENTS = 5;
final byte[] payload = new byte[(int) payloadSize.bytes()];
Settings settings = Settings.settingsBuilder()
.build();
NetworkService networkService = new NetworkService(settings);
final ThreadPool threadPool = new ThreadPool("BenchmarkNettyLargeMessages");
final TransportService transportServiceServer = new TransportService(
new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry()), threadPool
).start();
final TransportService transportServiceClient = new TransportService(
new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry()), threadPool
).start();
final DiscoveryNode bigNode = new DiscoveryNode("big", new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300), Version.CURRENT);
// final DiscoveryNode smallNode = new DiscoveryNode("small", new InetSocketTransportAddress("localhost", 9300));
final DiscoveryNode smallNode = bigNode;
transportServiceClient.connectToNode(bigNode);
transportServiceClient.connectToNode(smallNode);
transportServiceServer.registerRequestHandler("benchmark", BenchmarkMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler<BenchmarkMessageRequest>() {
@Override
public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(new BenchmarkMessageResponse(request));
}
});
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS);
for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
transportServiceClient.submitRequest(bigNode, "benchmark", message, TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK).build(), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet();
}
latch.countDown();
}
}).start();
}
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 1; i++) {
BenchmarkMessageRequest message = new BenchmarkMessageRequest(2, BytesRef.EMPTY_BYTES);
long start = System.currentTimeMillis();
transportServiceClient.submitRequest(smallNode, "benchmark", message, TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE).build(), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet();
long took = System.currentTimeMillis() - start;
System.out.println("Took " + took + "ms");
}
}
}).start();
latch.await();
}
}

View File

@ -1,182 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.transport;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
/**
*
*/
public class TransportBenchmark {
static enum Type {
LOCAL {
@Override
public Transport newTransport(Settings settings, ThreadPool threadPool) {
return new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry());
}
},
NETTY {
@Override
public Transport newTransport(Settings settings, ThreadPool threadPool) {
return new NettyTransport(settings, threadPool, new NetworkService(Settings.EMPTY), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry());
}
};
public abstract Transport newTransport(Settings settings, ThreadPool threadPool);
}
public static void main(String[] args) {
final String executor = ThreadPool.Names.GENERIC;
final boolean waitForRequest = true;
final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
final int NUMBER_OF_CLIENTS = 10;
final int NUMBER_OF_ITERATIONS = 100000;
final byte[] payload = new byte[(int) payloadSize.bytes()];
final AtomicLong idGenerator = new AtomicLong();
final Type type = Type.NETTY;
Settings settings = Settings.settingsBuilder()
.build();
final ThreadPool serverThreadPool = new ThreadPool("server");
final TransportService serverTransportService = new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool).start();
final ThreadPool clientThreadPool = new ThreadPool("client");
final TransportService clientTransportService = new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool).start();
final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT);
serverTransportService.registerRequestHandler("benchmark", BenchmarkMessageRequest::new, executor, new TransportRequestHandler<BenchmarkMessageRequest>() {
@Override
public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(new BenchmarkMessageResponse(request));
}
});
clientTransportService.connectToNode(node);
for (int i = 0; i < 10000; i++) {
BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
}
}).txGet();
}
Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
clients[i] = new Thread(new Runnable() {
@Override
public void run() {
for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
final long id = idGenerator.incrementAndGet();
BenchmarkMessageRequest request = new BenchmarkMessageRequest(id, payload);
BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
@Override
public BenchmarkMessageResponse newInstance() {
return new BenchmarkMessageResponse();
}
@Override
public String executor() {
return executor;
}
@Override
public void handleResponse(BenchmarkMessageResponse response) {
if (response.id() != id) {
System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]");
}
latch.countDown();
}
@Override
public void handleException(TransportException exp) {
exp.printStackTrace();
latch.countDown();
}
};
if (waitForRequest) {
clientTransportService.submitRequest(node, "benchmark", request, handler).txGet();
} else {
clientTransportService.sendRequest(node, "benchmark", request, handler);
}
}
}
});
}
StopWatch stopWatch = new StopWatch().start();
for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
clients[i].start();
}
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
stopWatch.stop();
System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());
clientTransportService.close();
clientThreadPool.shutdownNow();
serverTransportService.close();
serverThreadPool.shutdownNow();
}
}

View File

@ -1,158 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.transport.netty;
import org.jboss.netty.bootstrap.ClientBootstrap;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.*;
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
public class NettyEchoBenchmark {
public static void main(String[] args) throws Exception {
final int payloadSize = 100;
int CYCLE_SIZE = 50000;
final long NUMBER_OF_ITERATIONS = 500000;
ChannelBuffer message = ChannelBuffers.buffer(100);
for (int i = 0; i < message.capacity(); i++) {
message.writeByte((byte) i);
}
// Configure the server.
ServerBootstrap serverBootstrap = new ServerBootstrap(
new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()));
// Set up the pipeline factory.
serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new EchoServerHandler());
}
});
// Bind and start to accept incoming connections.
serverBootstrap.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9000));
ClientBootstrap clientBootstrap = new ClientBootstrap(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()));
// ClientBootstrap clientBootstrap = new ClientBootstrap(
// new OioClientSocketChannelFactory(Executors.newCachedThreadPool()));
// Set up the pipeline factory.
final EchoClientHandler clientHandler = new EchoClientHandler();
clientBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(clientHandler);
}
});
// Start the connection attempt.
ChannelFuture future = clientBootstrap.connect(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9000));
future.awaitUninterruptibly();
Channel clientChannel = future.getChannel();
System.out.println("Warming up...");
for (long i = 0; i < 10000; i++) {
clientHandler.latch = new CountDownLatch(1);
clientChannel.write(message);
try {
clientHandler.latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
System.out.println("Warmed up");
long start = System.currentTimeMillis();
long cycleStart = System.currentTimeMillis();
for (long i = 1; i < NUMBER_OF_ITERATIONS; i++) {
clientHandler.latch = new CountDownLatch(1);
clientChannel.write(message);
try {
clientHandler.latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
if ((i % CYCLE_SIZE) == 0) {
long cycleEnd = System.currentTimeMillis();
System.out.println("Ran 50000, TPS " + (CYCLE_SIZE / ((double) (cycleEnd - cycleStart) / 1000)));
cycleStart = cycleEnd;
}
}
long end = System.currentTimeMillis();
long seconds = (end - start) / 1000;
System.out.println("Ran [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + seconds + "], TPS: " + ((double) NUMBER_OF_ITERATIONS) / seconds);
clientChannel.close().awaitUninterruptibly();
clientBootstrap.releaseExternalResources();
serverBootstrap.releaseExternalResources();
}
public static class EchoClientHandler extends SimpleChannelUpstreamHandler {
public volatile CountDownLatch latch;
public EchoClientHandler() {
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
latch.countDown();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
e.getCause().printStackTrace();
e.getChannel().close();
}
}
public static class EchoServerHandler extends SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
e.getChannel().write(e.getMessage());
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
// Close the connection when an exception is raised.
e.getCause().printStackTrace();
e.getChannel().close();
}
}
}

View File

@ -1,65 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.uuid;
import org.elasticsearch.common.StopWatch;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
/**
*
*/
public class SimpleUuidBenchmark {
private static long NUMBER_OF_ITERATIONS = 10000;
private static int NUMBER_OF_THREADS = 100;
public static void main(String[] args) throws Exception {
StopWatch stopWatch = new StopWatch().start();
System.out.println("Running " + NUMBER_OF_ITERATIONS);
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
UUID.randomUUID().toString();
}
System.out.println("Generated in " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
System.out.println("Generating using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
UUID.randomUUID().toString();
}
latch.countDown();
}
});
}
stopWatch = new StopWatch().start();
for (Thread thread : threads) {
thread.start();
}
latch.await();
stopWatch.stop();
System.out.println("Generate in " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
}
}

View File

@ -1,99 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent.support.filtering;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import java.io.IOException;
import java.util.Arrays;
import java.util.Locale;
/**
* Benchmark class to compare filtered and unfiltered XContent generators.
*/
public class FilteringJsonGeneratorBenchmark {
public static void main(String[] args) throws IOException {
final XContent XCONTENT = JsonXContent.jsonXContent;
System.out.println("Executing " + FilteringJsonGeneratorBenchmark.class + "...");
System.out.println("Warming up...");
run(XCONTENT, 500_000, 100, 0.5);
System.out.println("Warmed up.");
System.out.println("nb documents | nb fields | nb fields written | % fields written | time (millis) | rate (docs/sec) | avg size");
for (int nbFields : Arrays.asList(10, 25, 50, 100, 250)) {
for (int nbDocs : Arrays.asList(100, 1000, 10_000, 100_000, 500_000)) {
for (double ratio : Arrays.asList(0.0, 1.0, 0.99, 0.95, 0.9, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01)) {
run(XCONTENT, nbDocs, nbFields, ratio);
}
}
}
System.out.println("Done.");
}
private static void run(XContent xContent, long nbIterations, int nbFields, double ratio) throws IOException {
String[] fields = fields(nbFields);
String[] filters = fields((int) (nbFields * ratio));
long size = 0;
BytesStreamOutput os = new BytesStreamOutput();
long start = System.nanoTime();
for (int i = 0; i < nbIterations; i++) {
XContentBuilder builder = new XContentBuilder(xContent, os, filters);
builder.startObject();
for (String field : fields) {
builder.field(field, System.nanoTime());
}
builder.endObject();
size += builder.bytes().length();
os.reset();
}
double milliseconds = (System.nanoTime() - start) / 1_000_000d;
System.out.printf(Locale.ROOT, "%12d | %9d | %17d | %14.2f %% | %10.3f ms | %15.2f | %8.0f %n",
nbIterations, nbFields,
(int) (nbFields * ratio),
(ratio * 100d),
milliseconds,
((double) nbIterations) / (milliseconds / 1000d),
size / ((double) nbIterations));
}
/**
* Returns a String array of field names starting from "field_0" with a length of n.
* If n=3, the array is ["field_0","field_1","field_2"]
*/
private static String[] fields(int n) {
String[] fields = new String[n];
for (int i = 0; i < n; i++) {
fields[i] = "field_" + i;
}
return fields;
}
}