Merge pull request #13291 from rjernst/no_test_exclusions2

Remove stress tests
This commit is contained in:
Ryan Ernst 2015-09-02 13:45:06 -07:00
commit e40b2d5dd7
23 changed files with 2 additions and 2972 deletions

View File

@ -59,16 +59,11 @@ public class NamingConventionTests extends ESTestCase {
for (final String packageName : packages) {
final String path = "/" + packageName.replace('.', '/');
final Path startPath = getDataPath(path);
final Set<Path> ignore = Sets.newHashSet(PathUtils.get("/org/elasticsearch/stresstest"), PathUtils.get("/org/elasticsearch/benchmark/stress"));
Files.walkFileTree(startPath, new FileVisitor<Path>() {
private Path pkgPrefix = PathUtils.get(path).getParent();
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
Path next = pkgPrefix.resolve(dir.getFileName());
if (ignore.contains(next)) {
return FileVisitResult.SKIP_SUBTREE;
}
pkgPrefix = next;
pkgPrefix = pkgPrefix.resolve(dir.getFileName());
return FileVisitResult.CONTINUE;
}
@ -170,7 +165,7 @@ public class NamingConventionTests extends ESTestCase {
innerClasses.isEmpty());
assertTrue("Pure Unit-Test found must subclass one of [" + classesToSubclass +"]:\n" + Joiner.on('\n').join(pureUnitTest),
pureUnitTest.isEmpty());
assertTrue("Classes ending with Test|Tests] must subclass [" + classesToSubclass +"]:\n" + Joiner.on('\n').join(notImplementing),
assertTrue("Classes ending with [Test|Tests] must subclass [" + classesToSubclass +"]:\n" + Joiner.on('\n').join(notImplementing),
notImplementing.isEmpty());
assertTrue("Subclasses of ESIntegTestCase should end with IT as they are integration tests:\n" + Joiner.on('\n').join(integTestsInDisguise),
integTestsInDisguise.isEmpty());

View File

@ -1,282 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.stress;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.node.Node;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
/**
*
*/
public class NodesStressTest {
private Node[] nodes;
private int numberOfNodes = 2;
private Client[] clients;
private AtomicLong idGenerator = new AtomicLong();
private int fieldNumLimit = 50;
private long searcherIterations = 10;
private Searcher[] searcherThreads = new Searcher[1];
private long indexIterations = 10;
private Indexer[] indexThreads = new Indexer[1];
private TimeValue sleepAfterDone = TimeValue.timeValueMillis(0);
private TimeValue sleepBeforeClose = TimeValue.timeValueMillis(0);
private CountDownLatch latch;
private CyclicBarrier barrier1;
private CyclicBarrier barrier2;
public NodesStressTest() {
}
public NodesStressTest numberOfNodes(int numberOfNodes) {
this.numberOfNodes = numberOfNodes;
return this;
}
public NodesStressTest fieldNumLimit(int fieldNumLimit) {
this.fieldNumLimit = fieldNumLimit;
return this;
}
public NodesStressTest searchIterations(int searchIterations) {
this.searcherIterations = searchIterations;
return this;
}
public NodesStressTest searcherThreads(int numberOfSearcherThreads) {
searcherThreads = new Searcher[numberOfSearcherThreads];
return this;
}
public NodesStressTest indexIterations(long indexIterations) {
this.indexIterations = indexIterations;
return this;
}
public NodesStressTest indexThreads(int numberOfWriterThreads) {
indexThreads = new Indexer[numberOfWriterThreads];
return this;
}
public NodesStressTest sleepAfterDone(TimeValue time) {
this.sleepAfterDone = time;
return this;
}
public NodesStressTest sleepBeforeClose(TimeValue time) {
this.sleepBeforeClose = time;
return this;
}
public NodesStressTest build(Settings settings) throws Exception {
settings = settingsBuilder()
// .put("index.refresh_interval", 1, TimeUnit.SECONDS)
.put(SETTING_NUMBER_OF_SHARDS, 5)
.put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(settings)
.build();
nodes = new Node[numberOfNodes];
clients = new Client[numberOfNodes];
for (int i = 0; i < numberOfNodes; i++) {
nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
clients[i] = nodes[i].client();
}
for (int i = 0; i < searcherThreads.length; i++) {
searcherThreads[i] = new Searcher(i);
}
for (int i = 0; i < indexThreads.length; i++) {
indexThreads[i] = new Indexer(i);
}
latch = new CountDownLatch(1);
barrier1 = new CyclicBarrier(2);
barrier2 = new CyclicBarrier(2);
// warmup
StopWatch stopWatch = new StopWatch().start();
Indexer warmup = new Indexer(-1).max(10000);
warmup.start();
barrier1.await();
barrier2.await();
latch.await();
stopWatch.stop();
System.out.println("Done Warmup, took [" + stopWatch.totalTime() + "]");
latch = new CountDownLatch(searcherThreads.length + indexThreads.length);
barrier1 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
barrier2 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
return this;
}
public void start() throws Exception {
for (Thread t : searcherThreads) {
t.start();
}
for (Thread t : indexThreads) {
t.start();
}
barrier1.await();
StopWatch stopWatch = new StopWatch();
stopWatch.start();
barrier2.await();
latch.await();
stopWatch.stop();
System.out.println("Done, took [" + stopWatch.totalTime() + "]");
System.out.println("Sleeping before close: " + sleepBeforeClose);
Thread.sleep(sleepBeforeClose.millis());
for (Client client : clients) {
client.close();
}
for (Node node : nodes) {
node.close();
}
System.out.println("Sleeping before exit: " + sleepBeforeClose);
Thread.sleep(sleepAfterDone.millis());
}
class Searcher extends Thread {
final int id;
long counter = 0;
long max = searcherIterations;
Searcher(int id) {
super("Searcher" + id);
this.id = id;
}
@Override
public void run() {
try {
barrier1.await();
barrier2.await();
for (; counter < max; counter++) {
Client client = client(counter);
QueryBuilder query = termQuery("num", counter % fieldNumLimit);
query = constantScoreQuery(query);
SearchResponse search = client.search(searchRequest()
.source(searchSource().query(query)))
.actionGet();
// System.out.println("Got search response, hits [" + search.hits().totalHits() + "]");
}
} catch (Exception e) {
System.err.println("Failed to search:");
e.printStackTrace();
} finally {
latch.countDown();
}
}
}
class Indexer extends Thread {
final int id;
long counter = 0;
long max = indexIterations;
Indexer(int id) {
super("Indexer" + id);
this.id = id;
}
Indexer max(int max) {
this.max = max;
return this;
}
@Override
public void run() {
try {
barrier1.await();
barrier2.await();
for (; counter < max; counter++) {
Client client = client(counter);
long id = idGenerator.incrementAndGet();
client.index(Requests.indexRequest().index("test").type("type1").id(Long.toString(id))
.source(XContentFactory.jsonBuilder().startObject()
.field("num", id % fieldNumLimit)
.endObject()))
.actionGet();
}
System.out.println("Indexer [" + id + "]: Done");
} catch (Exception e) {
System.err.println("Failed to index:");
e.printStackTrace();
} finally {
latch.countDown();
}
}
}
private Client client(long i) {
return clients[((int) (i % clients.length))];
}
public static void main(String[] args) throws Exception {
NodesStressTest test = new NodesStressTest()
.numberOfNodes(2)
.indexThreads(5)
.indexIterations(10 * 1000)
.searcherThreads(5)
.searchIterations(10 * 1000)
.sleepBeforeClose(TimeValue.timeValueMinutes(10))
.sleepAfterDone(TimeValue.timeValueMinutes(10))
.build(EMPTY_SETTINGS);
test.start();
}
}

View File

@ -1,123 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.stress;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import java.io.IOException;
import java.util.Random;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
/**
*
*/
public class SingleThreadBulkStress {
public static void main(String[] args) throws Exception {
Random random = new Random();
int shardsCount = Integer.parseInt(System.getProperty("es.shards", "1"));
int replicaCount = Integer.parseInt(System.getProperty("es.replica", "1"));
boolean autoGenerateId = true;
Settings settings = settingsBuilder()
.put("index.refresh_interval", "1s")
.put("index.merge.async", true)
.put("index.translog.flush_threshold_ops", 5000)
.put(SETTING_NUMBER_OF_SHARDS, shardsCount)
.put(SETTING_NUMBER_OF_REPLICAS, replicaCount)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
}
//Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
Node client = nodes[0];
Client client1 = client.client();
Thread.sleep(1000);
client1.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("_source").field("enabled", false).endObject()
.startObject("_all").field("enabled", false).endObject()
.startObject("_type").field("index", "no").endObject()
.startObject("_id").field("index", "no").endObject()
.startObject("properties")
.startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
// .startObject("field").field("index", "analyzed").field("omit_norms", false).endObject()
.endObject()
.endObject().endObject()).execute().actionGet();
Thread.sleep(5000);
StopWatch stopWatch = new StopWatch().start();
long COUNT = SizeValue.parseSizeValue("2m").singles();
int BATCH = 500;
System.out.println("Indexing [" + COUNT + "] ...");
long ITERS = COUNT / BATCH;
long i = 1;
int counter = 0;
for (; i <= ITERS; i++) {
BulkRequestBuilder request = client1.prepareBulk();
for (int j = 0; j < BATCH; j++) {
counter++;
request.add(Requests.indexRequest("test").type("type1").id(autoGenerateId ? null : Integer.toString(counter)).source(source(Integer.toString(counter), "test" + counter)));
}
BulkResponse response = request.execute().actionGet();
if (response.hasFailures()) {
System.err.println("failures...");
}
if (((i * BATCH) % 10000) == 0) {
System.out.println("Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
client.client().admin().indices().prepareRefresh().execute().actionGet();
System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
client.close();
for (Node node : nodes) {
node.close();
}
}
private static XContentBuilder source(String id, String nameValue) throws IOException {
return jsonBuilder().startObject().field("field", nameValue).endObject();
}
}

View File

@ -1,108 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.stress;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.node.Node;
import java.io.IOException;
import static org.elasticsearch.client.Requests.createIndexRequest;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
/**
*
*/
public class SingleThreadIndexingStress {
public static void main(String[] args) throws Exception {
Settings settings = settingsBuilder()
.put("index.refresh_interval", "1s")
.put("index.merge.async", true)
.put("index.translog.flush_threshold_ops", 5000)
.put(SETTING_NUMBER_OF_SHARDS, 2)
.put(SETTING_NUMBER_OF_REPLICAS, 1)
.build();
Node[] nodes = new Node[1];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
}
Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
Client client1 = client.client();
Thread.sleep(1000);
client1.admin().indices().create(createIndexRequest("test")).actionGet();
Thread.sleep(5000);
StopWatch stopWatch = new StopWatch().start();
int COUNT = 200000;
int ID_RANGE = 100;
System.out.println("Indexing [" + COUNT + "] ...");
int i = 1;
for (; i <= COUNT; i++) {
// client1.admin().cluster().preparePingSingle("test", "type1", Integer.toString(i)).execute().actionGet();
client1.prepareIndex("test", "type1").setId(Integer.toString(i % ID_RANGE)).setSource(source(Integer.toString(i), "test" + i))
.setCreate(false).execute().actionGet();
if ((i % 10000) == 0) {
System.out.println("Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
stopWatch.start();
}
}
System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
client.client().admin().indices().prepareRefresh().execute().actionGet();
System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
client.close();
for (Node node : nodes) {
node.close();
}
}
private static XContentBuilder source(String id, String nameValue) throws IOException {
long time = System.currentTimeMillis();
return jsonBuilder().startObject()
.field("id", id)
// .field("numeric1", time)
// .field("numeric2", time)
// .field("numeric3", time)
// .field("numeric4", time)
// .field("numeric5", time)
// .field("numeric6", time)
// .field("numeric7", time)
// .field("numeric8", time)
// .field("numeric9", time)
// .field("numeric10", time)
.field("name", nameValue)
.endObject();
}
}

View File

@ -1,90 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.client;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.net.InetAddress;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/**
*/
public class ClientFailover {
public static void main(String[] args) throws Exception {
Node[] nodes = new Node[3];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().node();
}
// TODO: what is this? a public static void main test?!?!
final TransportClient client = TransportClient.builder().build()
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300))
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9301))
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9302));
final AtomicBoolean done = new AtomicBoolean();
final AtomicLong indexed = new AtomicLong();
final CountDownLatch latch = new CountDownLatch(1);
Thread indexer = new Thread(new Runnable() {
@Override
public void run() {
while (!done.get()) {
try {
client.prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
indexed.incrementAndGet();
} catch (Exception e) {
e.printStackTrace();
}
}
latch.countDown();
}
});
indexer.start();
for (int i = 0; i < 100; i++) {
int index = i % nodes.length;
nodes[index].close();
ClusterHealthResponse health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
if (health.isTimedOut()) {
System.err.println("timed out on health");
}
nodes[index] = NodeBuilder.nodeBuilder().node();
health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
if (health.isTimedOut()) {
System.err.println("timed out on health");
}
}
latch.await();
// TODO add verification to the number of indexed docs
}
}

View File

@ -1,222 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.fullrestart;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class FullRestartStressTest {
private final ESLogger logger = Loggers.getLogger(getClass());
private int numberOfNodes = 4;
private int numberOfIndices = 5;
private int textTokens = 150;
private int numberOfFields = 10;
private int bulkSize = 1000;
private int numberOfDocsPerRound = 50000;
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
private TimeValue period = TimeValue.timeValueMinutes(20);
private AtomicLong indexCounter = new AtomicLong();
public FullRestartStressTest numberOfNodes(int numberOfNodes) {
this.numberOfNodes = numberOfNodes;
return this;
}
public FullRestartStressTest numberOfIndices(int numberOfIndices) {
this.numberOfIndices = numberOfIndices;
return this;
}
public FullRestartStressTest textTokens(int textTokens) {
this.textTokens = textTokens;
return this;
}
public FullRestartStressTest numberOfFields(int numberOfFields) {
this.numberOfFields = numberOfFields;
return this;
}
public FullRestartStressTest bulkSize(int bulkSize) {
this.bulkSize = bulkSize;
return this;
}
public FullRestartStressTest numberOfDocsPerRound(int numberOfDocsPerRound) {
this.numberOfDocsPerRound = numberOfDocsPerRound;
return this;
}
public FullRestartStressTest settings(Settings settings) {
this.settings = settings;
return this;
}
public FullRestartStressTest period(TimeValue period) {
this.period = period;
return this;
}
public void run() throws Exception {
long numberOfRounds = 0;
Random random = new Random(0);
long testStart = System.currentTimeMillis();
while (true) {
Node[] nodes = new Node[numberOfNodes];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
}
Node client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
// verify that the indices are there
for (int i = 0; i < numberOfIndices; i++) {
try {
client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
} catch (Exception e) {
// might already exists, fine
}
}
logger.info("*** Waiting for GREEN status");
try {
ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealth.isTimedOut()) {
logger.warn("timed out waiting for green status....");
}
} catch (Exception e) {
logger.warn("failed to execute cluster health....");
}
CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
logger.info("*** index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
// verify count
for (int i = 0; i < (nodes.length * 5); i++) {
count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
logger.debug("index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
if (count.getCount() != indexCounter.get()) {
logger.warn("!!! count does not match, index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
throw new Exception("failed test, count does not match...");
}
}
// verify search
for (int i = 0; i < (nodes.length * 5); i++) {
// do a search with norms field, so we don't rely on match all filtering cache
SearchResponse search = client.client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
logger.debug("index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
if (count.getCount() != indexCounter.get()) {
logger.warn("!!! search does not match, index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
throw new Exception("failed test, count does not match...");
}
}
logger.info("*** ROUND {}", ++numberOfRounds);
// bulk index data
int numberOfBulks = numberOfDocsPerRound / bulkSize;
for (int b = 0; b < numberOfBulks; b++) {
BulkRequestBuilder bulk = client.client().prepareBulk();
for (int k = 0; k < bulkSize; k++) {
StringBuilder sb = new StringBuilder();
XContentBuilder json = XContentFactory.jsonBuilder().startObject()
.field("field", "value" + ThreadLocalRandom.current().nextInt());
int fields = ThreadLocalRandom.current().nextInt() % numberOfFields;
for (int i = 0; i < fields; i++) {
json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
sb.setLength(0);
for (int j = 0; j < tokens; j++) {
sb.append(Strings.randomBase64UUID(random)).append(' ');
}
json.field("text_" + i, sb.toString());
}
json.endObject();
bulk.add(Requests.indexRequest("test" + (Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices)).type("type1").source(json));
indexCounter.incrementAndGet();
}
bulk.execute().actionGet();
}
client.close();
for (Node node : nodes) {
node.close();
}
if ((System.currentTimeMillis() - testStart) > period.millis()) {
logger.info("test finished, full_restart_rounds [{}]", numberOfRounds);
break;
}
}
}
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
int numberOfNodes = 2;
Settings settings = Settings.settingsBuilder()
.put("index.shard.check_on_startup", true)
.put("gateway.recover_after_nodes", numberOfNodes)
.put("index.number_of_shards", 1)
.put("path.data", "data/data1,data/data2")
.build();
FullRestartStressTest test = new FullRestartStressTest()
.settings(settings)
.period(TimeValue.timeValueMinutes(20))
.numberOfNodes(numberOfNodes)
.numberOfIndices(1)
.textTokens(150)
.numberOfFields(10)
.bulkSize(1000)
.numberOfDocsPerRound(10000);
test.run();
}
}

View File

@ -1,70 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.gcbehavior;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
public class FilterCacheGcStress {
public static void main(String[] args) {
Settings settings = Settings.EMPTY;
Node node = NodeBuilder.nodeBuilder().settings(settings).node();
final Client client = node.client();
client.admin().indices().prepareCreate("test").execute().actionGet();
client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
final AtomicBoolean stop = new AtomicBoolean();
Thread indexingThread = new Thread() {
@Override
public void run() {
while (!stop.get()) {
client.prepareIndex("test", "type1").setSource("field", System.currentTimeMillis()).execute().actionGet();
}
}
};
indexingThread.start();
Thread searchThread = new Thread() {
@Override
public void run() {
while (!stop.get()) {
client.prepareSearch()
.setQuery(filteredQuery(matchAllQuery(), rangeQuery("field").from(System.currentTimeMillis() - 1000000)))
.execute().actionGet();
}
}
};
searchThread.start();
}
}

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.get;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
public class GetStressTest {
public static void main(String[] args) throws Exception {
Settings settings = Settings.settingsBuilder()
.put("index.number_of_shards", 2)
.put("index.number_of_replicas", 1)
.build();
final int NUMBER_OF_NODES = 2;
final int NUMBER_OF_THREADS = 50;
final TimeValue TEST_TIME = TimeValue.parseTimeValue("10m", null, "TEST_TIME");
Node[] nodes = new Node[NUMBER_OF_NODES];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
}
final Node client = NodeBuilder.nodeBuilder()
.settings(settings)
.client(true)
.node();
client.client().admin().indices().prepareCreate("test").execute().actionGet();
final AtomicBoolean done = new AtomicBoolean();
final AtomicLong idGenerator = new AtomicLong();
final AtomicLong counter = new AtomicLong();
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
ThreadLocalRandom random = ThreadLocalRandom.current();
while (!done.get()) {
String id = String.valueOf(idGenerator.incrementAndGet());
client.client().prepareIndex("test", "type1", id)
.setSource("field", random.nextInt(100))
.execute().actionGet();
GetResponse getResponse = client.client().prepareGet("test", "type1", id)
//.setFields(Strings.EMPTY_ARRAY)
.execute().actionGet();
if (!getResponse.isExists()) {
System.err.println("Failed to find " + id);
}
long count = counter.incrementAndGet();
if ((count % 10000) == 0) {
System.out.println("Executed " + count);
}
}
}
});
}
for (Thread thread : threads) {
thread.start();
}
Thread.sleep(TEST_TIME.millis());
System.out.println("test done.");
done.set(true);
}
}

View File

@ -1,106 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.get;
import com.google.common.collect.Sets;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
/**
*/
public class MGetStress1 {
public static void main(String[] args) throws Exception {
final int NUMBER_OF_NODES = 2;
final int NUMBER_OF_DOCS = 50000;
final int MGET_BATCH = 1000;
Node[] nodes = new Node[NUMBER_OF_NODES];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().node();
}
System.out.println("---> START Indexing initial data [" + NUMBER_OF_DOCS + "]");
final Client client = nodes[0].client();
for (int i = 0; i < NUMBER_OF_DOCS; i++) {
client.prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
}
System.out.println("---> DONE Indexing initial data [" + NUMBER_OF_DOCS + "]");
final AtomicBoolean done = new AtomicBoolean();
// start indexer
Thread indexer = new Thread(new Runnable() {
@Override
public void run() {
while (!done.get()) {
client.prepareIndex("test", "type", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
.setSource("field", "value").execute().actionGet();
}
}
});
indexer.start();
System.out.println("---> Starting indexer");
// start the mget one
Thread mget = new Thread(new Runnable() {
@Override
public void run() {
while (!done.get()) {
Set<String> ids = Sets.newHashSet();
for (int i = 0; i < MGET_BATCH; i++) {
ids.add(Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)));
}
//System.out.println("---> mget for [" + ids.size() + "]");
MultiGetResponse response = client.prepareMultiGet().add("test", "type", ids).execute().actionGet();
int expected = ids.size();
int count = 0;
for (MultiGetItemResponse item : response) {
count++;
if (item.isFailed()) {
System.err.println("item failed... " + item.getFailure());
} else {
boolean removed = ids.remove(item.getId());
if (!removed) {
System.err.println("got id twice " + item.getId());
}
}
}
if (expected != count) {
System.err.println("Expected [" + expected + "], got back [" + count + "]");
}
}
}
});
mget.start();
System.out.println("---> Starting mget");
Thread.sleep(TimeValue.timeValueMinutes(10).millis());
done.set(true);
}
}

View File

@ -1,71 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.indexing;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.concurrent.ThreadLocalRandom;
/**
*/
public class BulkIndexingStressTest {
public static void main(String[] args) {
final int NUMBER_OF_NODES = 4;
final int NUMBER_OF_INDICES = 600;
final int BATCH = 300;
final Settings nodeSettings = Settings.settingsBuilder().put("index.number_of_shards", 2).build();
// ESLogger logger = Loggers.getLogger("org.elasticsearch");
// logger.setLevel("DEBUG");
Node[] nodes = new Node[NUMBER_OF_NODES];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(nodeSettings).node();
}
Client client = nodes.length == 1 ? nodes[0].client() : nodes[1].client();
while (true) {
BulkRequestBuilder bulkRequest = client.prepareBulk();
for (int i = 0; i < BATCH; i++) {
bulkRequest.add(Requests.indexRequest("test" + ThreadLocalRandom.current().nextInt(NUMBER_OF_INDICES)).type("type").source("field", "value"));
}
BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse) {
if (item.isFailed()) {
System.out.println("failed response:" + item.getFailureMessage());
}
}
throw new RuntimeException("Failed responses");
}
;
}
}
}

View File

@ -1,118 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.indexing;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
/**
* Checks that index operation does not create duplicate documents.
*/
public class ConcurrentIndexingVersioningStressTest {
public static void main(String[] args) throws Exception {
Settings settings = Settings.EMPTY;
Node node1 = nodeBuilder().settings(settings).node();
Node node2 = nodeBuilder().settings(settings).node();
final Node client = nodeBuilder().settings(settings).client(true).node();
final int NUMBER_OF_DOCS = 10000;
final int NUMBER_OF_THREADS = 10;
final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles();
final long DELETE_EVERY = 10;
final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
Thread[] threads = new Thread[NUMBER_OF_THREADS];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
if ((i % DELETE_EVERY) == 0) {
client.client().prepareDelete("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).execute().actionGet();
} else {
client.client().prepareIndex("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).setSource("field1", "value1").execute().actionGet();
}
}
} finally {
latch.countDown();
}
}
};
}
for (Thread thread : threads) {
thread.start();
}
latch.await();
System.out.println("done indexing, verifying docs");
client.client().admin().indices().prepareRefresh().execute().actionGet();
for (int i = 0; i < NUMBER_OF_DOCS; i++) {
String id = Integer.toString(i);
for (int j = 0; j < 5; j++) {
SearchResponse response = client.client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).execute().actionGet();
if (response.getHits().totalHits() > 1) {
System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]");
}
}
GetResponse getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
if (getResponse.isExists()) {
long version = getResponse.getVersion();
for (int j = 0; j < 5; j++) {
getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
if (!getResponse.isExists()) {
System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
break;
}
if (version != getResponse.getVersion()) {
System.err.println("[" + i + "] FAIL, DIFFERENT VERSIONS: [" + version + "], [" + getResponse.getVersion() + "]");
break;
}
}
} else {
for (int j = 0; j < 5; j++) {
getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
if (getResponse.isExists()) {
System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
break;
}
}
}
}
System.out.println("done.");
client.close();
node1.close();
node2.close();
}
}

View File

@ -1,47 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.leaks;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.monitor.jvm.JvmService;
import org.elasticsearch.monitor.os.OsService;
import org.elasticsearch.monitor.process.ProcessService;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
public class GenericStatsLeak {
public static void main(String[] args) {
Node node = NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder()
.put("monitor.os.refresh_interval", 0)
.put("monitor.process.refresh_interval", 0)
).node();
JvmService jvmService = node.injector().getInstance(JvmService.class);
OsService osService = node.injector().getInstance(OsService.class);
ProcessService processService = node.injector().getInstance(ProcessService.class);
while (true) {
jvmService.stats();
osService.stats();
processService.stats();
}
}
}

View File

@ -1,35 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.leaks;
import org.elasticsearch.monitor.jvm.JvmStats;
/**
* This test mainly comes to check the native memory leak with getLastGCInfo (which is now
* disabled by default).
*/
public class JvmStatsLeak {
public static void main(String[] args) {
while (true) {
JvmStats.jvmStats();
}
}
}

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.manyindices;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.net.InetAddress;
import java.util.Date;
/**
*
*/
public class ManyIndicesRemoteStressTest {
private static final ESLogger logger = Loggers.getLogger(ManyIndicesRemoteStressTest.class);
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
int numberOfShards = 1;
int numberOfReplicas = 1;
int numberOfIndices = 1000;
int numberOfDocs = 1;
Client client;
Node node = null;
// TODO: what is this? a public static void main test?!?!?!
if (true) {
client = TransportClient.builder().settings(Settings.EMPTY).build().addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300));
} else {
node = NodeBuilder.nodeBuilder().client(true).node();
client = node.client();
}
for (int i = 0; i < numberOfIndices; i++) {
logger.info("START index [{}] ...", i);
client.admin().indices().prepareCreate("index_" + i)
.setSettings(Settings.settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas))
.execute().actionGet();
for (int j = 0; j < numberOfDocs; j++) {
client.prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
}
logger.info("DONE index [{}]", i);
}
logger.info("closing node...");
if (node != null) {
node.close();
}
logger.info("node closed");
}
}

View File

@ -1,98 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.manyindices;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.Date;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class ManyIndicesStressTest {
private static final ESLogger logger = Loggers.getLogger(ManyIndicesStressTest.class);
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
int numberOfIndices = 100;
int numberOfDocs = 100;
Settings settings = Settings.settingsBuilder()
.put("index.shard.check_on_startup", false)
.put("index.number_of_shards", 1)
.build();
Node node = NodeBuilder.nodeBuilder().settings(settings).node();
for (int i = 0; i < numberOfIndices; i++) {
logger.info("START index [{}] ...", i);
node.client().admin().indices().prepareCreate("index_" + i).execute().actionGet();
for (int j = 0; j < numberOfDocs; j++) {
node.client().prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
}
logger.info("DONE index [{}] ...", i);
}
logger.info("closing node...");
node.close();
logger.info("node closed");
logger.info("starting node...");
node = NodeBuilder.nodeBuilder().settings(settings).node();
ClusterHealthResponse health = node.client().admin().cluster().prepareHealth().setTimeout("5m").setWaitForYellowStatus().execute().actionGet();
logger.info("health: " + health.getStatus());
logger.info("active shards: " + health.getActiveShards());
logger.info("active primary shards: " + health.getActivePrimaryShards());
if (health.isTimedOut()) {
logger.error("Timed out on health...");
}
ClusterState clusterState = node.client().admin().cluster().prepareState().execute().actionGet().getState();
for (int i = 0; i < numberOfIndices; i++) {
if (clusterState.blocks().indices().containsKey("index_" + i)) {
logger.error("index [{}] has blocks: {}", i, clusterState.blocks().indices().get("index_" + i));
}
}
for (int i = 0; i < numberOfIndices; i++) {
long count = node.client().prepareCount("index_" + i).setQuery(matchAllQuery()).execute().actionGet().getCount();
if (count == numberOfDocs) {
logger.info("VERIFIED [{}], count [{}]", i, count);
} else {
logger.error("FAILED [{}], expected [{}], got [{}]", i, numberOfDocs, count);
}
}
logger.info("closing node...");
node.close();
logger.info("node closed");
}
}

View File

@ -1,126 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.manyindices;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.ArrayList;
import java.util.List;
public class ManyNodesManyIndicesRecoveryStressTest {
public static void main(String[] args) throws Exception {
final int NUM_NODES = 40;
final int NUM_INDICES = 100;
final int NUM_DOCS = 2;
final int FLUSH_AFTER = 1;
final Settings nodeSettings = Settings.settingsBuilder()
.put("transport.netty.connections_per_node.low", 0)
.put("transport.netty.connections_per_node.med", 0)
.put("transport.netty.connections_per_node.high", 1)
.build();
final Settings indexSettings = Settings.settingsBuilder()
.put("index.number_of_shards", 1)
.build();
List<Node> nodes = new ArrayList<>();
for (int i = 0; i < NUM_NODES; i++) {
nodes.add(NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
}
Client client = nodes.get(0).client();
for (int index = 0; index < NUM_INDICES; index++) {
String indexName = "index_" + index;
System.out.println("--> Processing index [" + indexName + "]...");
client.admin().indices().prepareCreate(indexName).setSettings(indexSettings).execute().actionGet();
boolean flushed = false;
for (int doc = 0; doc < NUM_DOCS; doc++) {
if (!flushed && doc > FLUSH_AFTER) {
flushed = true;
client.admin().indices().prepareFlush(indexName).execute().actionGet();
}
client.prepareIndex(indexName, "type1", Integer.toString(doc)).setSource("field", "value" + doc).execute().actionGet();
}
System.out.println("--> DONE index [" + indexName + "]");
}
System.out.println("--> Initiating shutdown");
for (Node node : nodes) {
node.close();
}
System.out.println("--> Waiting for all nodes to be closed...");
while (true) {
boolean allAreClosed = true;
for (Node node : nodes) {
if (!node.isClosed()) {
allAreClosed = false;
break;
}
}
if (allAreClosed) {
break;
}
Thread.sleep(100);
}
System.out.println("Waiting a bit for node lock to really be released?");
Thread.sleep(5000);
System.out.println("--> All nodes are closed, starting back...");
nodes = new ArrayList<>();
for (int i = 0; i < NUM_NODES; i++) {
nodes.add(NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
}
client = nodes.get(0).client();
System.out.println("--> Waiting for green status");
while (true) {
ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
if (clusterHealth.isTimedOut()) {
System.err.println("--> cluster health timed out..., active shards [" + clusterHealth.getActiveShards() + "]");
} else {
break;
}
}
System.out.println("Verifying counts...");
for (int index = 0; index < NUM_INDICES; index++) {
String indexName = "index_" + index;
CountResponse count = client.prepareCount(indexName).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
if (count.getCount() != NUM_DOCS) {
System.err.println("Wrong count value, expected [" + NUM_DOCS + "], got [" + count.getCount() + "] for index [" + indexName + "]");
}
}
System.out.println("Test end");
for (Node node : nodes) {
node.close();
}
}
}

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.refresh;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.io.IOException;
import java.util.UUID;
/**
*/
public class RefreshStressTest1 {
public static void main(String[] args) throws InterruptedException, IOException {
int numberOfShards = 5;
Node node = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
Settings.settingsBuilder()
.put("node.name", "node1")
.put("index.number_of_shards", numberOfShards)
//.put("path.data", new File("target/data").getAbsolutePath())
.build()).node();
Node node2 = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
Settings.settingsBuilder()
.put("node.name", "node2")
.put("index.number_of_shards", numberOfShards)
//.put("path.data", new File("target/data").getAbsolutePath())
.build()).node();
Client client = node.client();
for (int loop = 1; loop < 1000; loop++) {
String indexName = "testindex" + loop;
String typeName = "testType" + loop;
String id = UUID.randomUUID().toString();
String mapping = "{ \"" + typeName + "\" : {\"dynamic_templates\" : [{\"no_analyze_strings\" : {\"match_mapping_type\" : \"string\",\"match\" : \"*\",\"mapping\" : {\"type\" : \"string\",\"index\" : \"not_analyzed\"}}}]}}";
client.admin().indices().prepareCreate(indexName).execute().actionGet();
client.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(mapping).execute().actionGet();
// sleep after put mapping
// Thread.sleep(100);
System.out.println("indexing " + loop);
String name = "name" + id;
client.prepareIndex(indexName, typeName, id).setSource("{ \"id\": \"" + id + "\", \"name\": \"" + name + "\" }").execute().actionGet();
client.admin().indices().prepareRefresh(indexName).execute().actionGet();
// sleep after refresh
// Thread.sleep(100);
System.out.println("searching " + loop);
SearchResponse result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
if (result.getHits().hits().length != 1) {
for (int i = 1; i <= 100; i++) {
System.out.println("retry " + loop + ", " + i + ", previous total hits: " + result.getHits().getTotalHits());
client.admin().indices().prepareRefresh(indexName).execute().actionGet();
Thread.sleep(100);
result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
if (result.getHits().hits().length == 1) {
client.admin().indices().prepareRefresh(indexName).execute().actionGet();
result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
throw new RuntimeException("Record found after " + (i * 100) + " ms, second go: " + result.getHits().hits().length);
} else if (i == 100) {
if (client.prepareGet(indexName, typeName, id).execute().actionGet().isExists())
throw new RuntimeException("Record wasn't found after 10s but can be get by id");
else throw new RuntimeException("Record wasn't found after 10s and can't be get by id");
}
}
}
//client.admin().indices().prepareDelete(indexName).execute().actionGet();
}
client.close();
node2.close();
node.close();
}
}

View File

@ -1,124 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.rollingrestart;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import java.util.Date;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/**
*/
public class QuickRollingRestartStressTest {
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
Random random = new Random();
Settings settings = Settings.settingsBuilder().build();
Node[] nodes = new Node[5];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
}
Node client = NodeBuilder.nodeBuilder().client(true).node();
long COUNT;
if (client.client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
throw new ElasticsearchException("failed to wait for green state on startup...");
}
COUNT = client.client().prepareCount().execute().actionGet().getCount();
System.out.println("--> existing index, count [" + COUNT + "]");
} else {
COUNT = SizeValue.parseSizeValue("100k").singles();
System.out.println("--> indexing data...");
for (long i = 0; i < COUNT; i++) {
client.client().prepareIndex("test", "type", Long.toString(i))
.setSource("date", new Date(), "data", RandomStrings.randomAsciiOfLength(random, 10000))
.execute().actionGet();
}
System.out.println("--> done indexing data [" + COUNT + "]");
client.client().admin().indices().prepareRefresh().execute().actionGet();
for (int i = 0; i < 10; i++) {
long count = client.client().prepareCount().execute().actionGet().getCount();
if (COUNT != count) {
System.err.println("--> the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
}
}
}
final int ROLLING_RESTARTS = 100;
System.out.println("--> starting rolling restarts [" + ROLLING_RESTARTS + "]");
for (int rollingRestart = 0; rollingRestart < ROLLING_RESTARTS; rollingRestart++) {
System.out.println("--> doing rolling restart [" + rollingRestart + "]...");
int nodeId = ThreadLocalRandom.current().nextInt();
for (int i = 0; i < nodes.length; i++) {
int nodeIdx = Math.abs(nodeId++) % nodes.length;
nodes[nodeIdx].close();
nodes[nodeIdx] = NodeBuilder.nodeBuilder().settings(settings).node();
}
System.out.println("--> done rolling restart [" + rollingRestart + "]");
System.out.println("--> waiting for green state now...");
ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForRelocatingShards(0).setTimeout("10m").execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("--> timed out waiting for green state...");
ClusterState state = client.client().admin().cluster().prepareState().execute().actionGet().getState();
System.out.println(state.nodes().prettyPrint());
System.out.println(state.routingTable().prettyPrint());
System.out.println(state.getRoutingNodes().prettyPrint());
throw new ElasticsearchException("timed out waiting for green state");
} else {
System.out.println("--> got green status");
}
System.out.println("--> checking data [" + rollingRestart + "]....");
boolean failed = false;
for (int i = 0; i < 10; i++) {
long count = client.client().prepareCount().execute().actionGet().getCount();
if (COUNT != count) {
failed = true;
System.err.println("--> ERROR the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
}
}
if (!failed) {
System.out.println("--> count verified");
}
}
System.out.println("--> shutting down...");
client.close();
for (Node node : nodes) {
node.close();
}
}
}

View File

@ -1,354 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.rollingrestart;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.search.SearchHit;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
/**
*
*/
public class RollingRestartStressTest {
private final ESLogger logger = Loggers.getLogger(getClass());
private int numberOfShards = 5;
private int numberOfReplicas = 1;
private int numberOfNodes = 4;
private int textTokens = 150;
private int numberOfFields = 10;
private long initialNumberOfDocs = 100000;
private int indexers = 0;
private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
private TimeValue period = TimeValue.timeValueMinutes(20);
private boolean clearNodeData = true;
private Node client;
private AtomicLong indexCounter = new AtomicLong();
private AtomicLong idCounter = new AtomicLong();
public RollingRestartStressTest numberOfNodes(int numberOfNodes) {
this.numberOfNodes = numberOfNodes;
return this;
}
public RollingRestartStressTest numberOfShards(int numberOfShards) {
this.numberOfShards = numberOfShards;
return this;
}
public RollingRestartStressTest numberOfReplicas(int numberOfReplicas) {
this.numberOfReplicas = numberOfReplicas;
return this;
}
public RollingRestartStressTest initialNumberOfDocs(long initialNumberOfDocs) {
this.initialNumberOfDocs = initialNumberOfDocs;
return this;
}
public RollingRestartStressTest textTokens(int textTokens) {
this.textTokens = textTokens;
return this;
}
public RollingRestartStressTest numberOfFields(int numberOfFields) {
this.numberOfFields = numberOfFields;
return this;
}
public RollingRestartStressTest indexers(int indexers) {
this.indexers = indexers;
return this;
}
public RollingRestartStressTest indexerThrottle(TimeValue indexerThrottle) {
this.indexerThrottle = indexerThrottle;
return this;
}
public RollingRestartStressTest period(TimeValue period) {
this.period = period;
return this;
}
public RollingRestartStressTest cleanNodeData(boolean clearNodeData) {
this.clearNodeData = clearNodeData;
return this;
}
public RollingRestartStressTest settings(Settings settings) {
this.settings = settings;
return this;
}
public void run() throws Exception {
Random random = new Random(0);
Node[] nodes = new Node[numberOfNodes];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
}
client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
client.client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
.put("index.number_of_shards", numberOfShards)
.put("index.number_of_replicas", numberOfReplicas)
).execute().actionGet();
logger.info("********** [START] INDEXING INITIAL DOCS");
for (long i = 0; i < initialNumberOfDocs; i++) {
indexDoc(random);
}
logger.info("********** [DONE ] INDEXING INITIAL DOCS");
Indexer[] indexerThreads = new Indexer[indexers];
for (int i = 0; i < indexerThreads.length; i++) {
indexerThreads[i] = new Indexer();
}
for (int i = 0; i < indexerThreads.length; i++) {
indexerThreads[i].start();
}
long testStart = System.currentTimeMillis();
// start doing the rolling restart
int nodeIndex = 0;
while (true) {
Path[] nodeData = nodes[nodeIndex].injector().getInstance(NodeEnvironment.class).nodeDataPaths();
nodes[nodeIndex].close();
if (clearNodeData) {
try {
IOUtils.rm(nodeData);
} catch (Exception ex) {
logger.debug("Failed to delete node data directories", ex);
}
}
try {
ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
.setWaitForGreenStatus()
.setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
.setWaitForRelocatingShards(0)
.setTimeout("10m").execute().actionGet();
if (clusterHealth.isTimedOut()) {
logger.warn("timed out waiting for green status....");
}
} catch (Exception e) {
logger.warn("failed to execute cluster health....");
}
nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();
Thread.sleep(1000);
try {
ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
.setWaitForGreenStatus()
.setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
.setWaitForRelocatingShards(0)
.setTimeout("10m").execute().actionGet();
if (clusterHealth.isTimedOut()) {
logger.warn("timed out waiting for green status....");
}
} catch (Exception e) {
logger.warn("failed to execute cluster health....");
}
if (++nodeIndex == nodes.length) {
nodeIndex = 0;
}
if ((System.currentTimeMillis() - testStart) > period.millis()) {
logger.info("test finished");
break;
}
}
for (int i = 0; i < indexerThreads.length; i++) {
indexerThreads[i].close = true;
}
Thread.sleep(indexerThrottle.millis() + 10000);
for (int i = 0; i < indexerThreads.length; i++) {
if (!indexerThreads[i].closed) {
logger.warn("thread not closed!");
}
}
client.client().admin().indices().prepareRefresh().execute().actionGet();
// check the count
for (int i = 0; i < (nodes.length * 5); i++) {
CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
logger.info("indexed [{}], count [{}], [{}]", count.getCount(), indexCounter.get(), count.getCount() == indexCounter.get() ? "OK" : "FAIL");
if (count.getCount() != indexCounter.get()) {
logger.warn("count does not match!");
}
}
// scan all the docs, verify all have the same version based on the number of replicas
SearchResponse searchResponse = client.client().prepareSearch()
.setSearchType(SearchType.SCAN)
.setQuery(matchAllQuery())
.setSize(50)
.setScroll(TimeValue.timeValueMinutes(2))
.execute().actionGet();
logger.info("Verifying versions for {} hits...", searchResponse.getHits().totalHits());
while (true) {
searchResponse = client.client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
logger.warn("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
}
for (SearchHit hit : searchResponse.getHits()) {
long version = -1;
for (int i = 0; i < (numberOfReplicas + 1); i++) {
GetResponse getResponse = client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
if (version == -1) {
version = getResponse.getVersion();
} else {
if (version != getResponse.getVersion()) {
logger.warn("Doc {} has different version numbers {} and {}", hit.id(), version, getResponse.getVersion());
}
}
}
}
if (searchResponse.getHits().hits().length == 0) {
break;
}
}
logger.info("Done verifying versions");
client.close();
for (Node node : nodes) {
node.close();
}
}
private class Indexer extends Thread {
volatile boolean close = false;
volatile boolean closed = false;
@Override
public void run() {
Random random = new Random(0);
while (true) {
if (close) {
closed = true;
return;
}
try {
indexDoc(random);
Thread.sleep(indexerThrottle.millis());
} catch (Exception e) {
logger.warn("failed to index / sleep", e);
}
}
}
}
private void indexDoc(Random random) throws Exception {
StringBuilder sb = new StringBuilder();
XContentBuilder json = XContentFactory.jsonBuilder().startObject()
.field("field", "value" + ThreadLocalRandom.current().nextInt());
int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
for (int i = 0; i < fields; i++) {
json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
sb.setLength(0);
for (int j = 0; j < tokens; j++) {
sb.append(Strings.randomBase64UUID(random)).append(' ');
}
json.field("text_" + i, sb.toString());
}
json.endObject();
String id = Long.toString(idCounter.incrementAndGet());
client.client().prepareIndex("test", "type1", id)
.setCreate(true)
.setSource(json)
.execute().actionGet();
indexCounter.incrementAndGet();
}
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
Settings settings = settingsBuilder()
.put("index.shard.check_on_startup", true)
.put("path.data", "data/data1,data/data2")
.build();
RollingRestartStressTest test = new RollingRestartStressTest()
.settings(settings)
.numberOfNodes(4)
.numberOfShards(5)
.numberOfReplicas(1)
.initialNumberOfDocs(1000)
.textTokens(150)
.numberOfFields(10)
.cleanNodeData(false)
.indexers(5)
.indexerThrottle(TimeValue.timeValueMillis(50))
.period(TimeValue.timeValueMinutes(3));
test.run();
}
}

View File

@ -1,110 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.search1;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.search.SearchHit;
import org.junit.Ignore;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
/**
* Tests that data don't get corrupted while reading it over the streams.
* <p/>
* See: https://github.com/elasticsearch/elasticsearch/issues/1686.
*/
public class ConcurrentSearchSerializationBenchmark {
public static void main(String[] args) throws Exception {
Node node1 = NodeBuilder.nodeBuilder().node();
Node node2 = NodeBuilder.nodeBuilder().node();
Node node3 = NodeBuilder.nodeBuilder().node();
final Client client = node1.client();
System.out.println("Indexing...");
final String data = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), 100);
final CountDownLatch latch1 = new CountDownLatch(100);
for (int i = 0; i < 100; i++) {
client.prepareIndex("test", "type", Integer.toString(i))
.setSource("field", data)
.execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
latch1.countDown();
}
@Override
public void onFailure(Throwable e) {
latch1.countDown();
}
});
}
latch1.await();
System.out.println("Indexed");
System.out.println("searching...");
Thread[] threads = new Thread[10];
final CountDownLatch latch = new CountDownLatch(threads.length);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < 1000; i++) {
SearchResponse searchResponse = client.prepareSearch("test")
.setQuery(QueryBuilders.matchAllQuery())
.setSize(i % 100)
.execute().actionGet();
for (SearchHit hit : searchResponse.getHits()) {
try {
if (!hit.sourceAsMap().get("field").equals(data)) {
System.err.println("Field not equal!");
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
latch.countDown();
}
});
}
for (Thread thread : threads) {
thread.start();
}
latch.await();
System.out.println("done searching");
client.close();
node1.close();
node2.close();
node3.close();
}
}

View File

@ -1,237 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.search1;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.transport.RemoteTransportException;
import java.io.IOException;
import java.util.*;
public class ParentChildStressTest {
private Node elasticNode;
private Client client;
private static final String PARENT_TYPE_NAME = "content";
private static final String CHILD_TYPE_NAME = "contentFiles";
private static final String INDEX_NAME = "acme";
/**
* Constructor. Initialize elastic and create the index/mapping
*/
public ParentChildStressTest() {
NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder();
Settings settings = nodeBuilder.settings()
.build();
this.elasticNode = nodeBuilder.settings(settings).client(true).node();
this.client = this.elasticNode.client();
String mapping =
"{\"contentFiles\": {" +
"\"_parent\": {" +
"\"type\" : \"content\"" +
"}}}";
try {
client.admin().indices().create(new CreateIndexRequest(INDEX_NAME).mapping(CHILD_TYPE_NAME, mapping)).actionGet();
} catch (RemoteTransportException e) {
// usually means the index is already created.
}
}
public void shutdown() throws IOException {
client.close();
elasticNode.close();
}
/**
* Deletes the item from both the parent and child type locations.
*/
public void deleteById(String id) {
client.prepareDelete(INDEX_NAME, PARENT_TYPE_NAME, id).execute().actionGet();
client.prepareDelete(INDEX_NAME, CHILD_TYPE_NAME, id).execute().actionGet();
}
/**
* Index a parent doc
*/
public void indexParent(String id, Map<String, Object> objectMap) throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
// index content
client.prepareIndex(INDEX_NAME, PARENT_TYPE_NAME, id).setSource(builder.map(objectMap)).execute().actionGet();
}
/**
* Index the file as a child doc
*/
public void indexChild(String id, Map<String, Object> objectMap) throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
IndexRequestBuilder indexRequestbuilder = client.prepareIndex(INDEX_NAME, CHILD_TYPE_NAME, id);
indexRequestbuilder = indexRequestbuilder.setParent(id);
indexRequestbuilder = indexRequestbuilder.setSource(builder.map(objectMap));
indexRequestbuilder.execute().actionGet();
}
/**
* Execute a search based on a JSON String in QueryDSL format.
* <p/>
* Throws a RuntimeException if there are any shard failures to
* elevate the visibility of the problem.
*/
public List<String> executeSearch(String source) {
SearchRequest request = Requests.searchRequest(INDEX_NAME).source(source);
List<ShardSearchFailure> failures;
SearchResponse response;
response = client.search(request).actionGet();
failures = Arrays.asList(response.getShardFailures());
// throw an exception so that we see the shard failures
if (failures.size() != 0) {
String failuresStr = failures.toString();
if (!failuresStr.contains("reason [No active shards]")) {
throw new RuntimeException(failures.toString());
}
}
ArrayList<String> results = new ArrayList<>();
if (response != null) {
for (SearchHit hit : response.getHits()) {
String sourceStr = hit.sourceAsString();
results.add(sourceStr);
}
}
return results;
}
/**
* Create a document as a parent and index it.
* Load a file and index it as a child.
*/
public String indexDoc() throws IOException {
String id = UUID.randomUUID().toString();
Map<String, Object> objectMap = new HashMap<>();
objectMap.put("title", "this is a document");
Map<String, Object> objectMap2 = new HashMap<>();
objectMap2.put("description", "child test");
this.indexParent(id, objectMap);
this.indexChild(id, objectMap2);
return id;
}
/**
* Perform the has_child query for the doc.
* <p/>
* Since it might take time to get indexed, it
* loops until it finds the doc.
*/
public void searchDocByChild() throws InterruptedException {
String dslString =
"{\"query\":{" +
"\"has_child\":{" +
"\"query\":{" +
"\"field\":{" +
"\"description\":\"child test\"}}," +
"\"type\":\"contentFiles\"}}}";
int numTries = 0;
List<String> items = new ArrayList<>();
while (items.size() != 1 && numTries < 20) {
items = executeSearch(dslString);
numTries++;
if (items.size() != 1) {
Thread.sleep(250);
}
}
if (items.size() != 1) {
System.out.println("Exceeded number of retries");
System.exit(1);
}
}
/**
* Program to loop on:
* create parent/child doc
* search for the doc
* delete the doc
* repeat the above until shard failure.
* <p/>
* Eventually fails with:
* <p/>
* [shard [[74wz0lrXRSmSOsJOqgPvlw][acme][1]], reason [RemoteTransportException
* [[Kismet][inet[/10.10.30.52:9300]][search/phase/query]]; nested:
* QueryPhaseExecutionException[[acme][1]:
* query[ConstantScore(child_filter[contentFiles
* /content](filtered(file:mission
* file:statement)->FilterCacheFilterWrapper(
* _type:contentFiles)))],from[0],size[10]: Query Failed [Failed to execute
* child query [filtered(file:mission
* file:statement)->FilterCacheFilterWrapper(_type:contentFiles)]]]; nested:
* ]]
*
* @param args
*/
public static void main(String[] args) throws IOException {
ParentChildStressTest elasticTest = new ParentChildStressTest();
try {
// loop a bunch of times - usually fails before the count is done.
int NUM_LOOPS = 1000;
System.out.println();
System.out.println("Looping [" + NUM_LOOPS + "] times:");
System.out.println();
for (int i = 0; i < NUM_LOOPS; i++) {
String id = elasticTest.indexDoc();
elasticTest.searchDocByChild();
elasticTest.deleteById(id);
System.out.println(" Success: " + i);
}
elasticTest.shutdown();
} catch (Exception e) {
e.printStackTrace();
} finally {
elasticTest.shutdown();
}
}
}

View File

@ -1,374 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.stresstest.search1;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.SizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.sort.SortOrder;
import org.junit.Ignore;
import java.util.Arrays;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
public class Search1StressBenchmark {
private final ESLogger logger = Loggers.getLogger(getClass());
private int numberOfNodes = 4;
private int indexers = 0;
private SizeValue preIndexDocs = new SizeValue(0);
private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
private int searchers = 0;
private TimeValue searcherThrottle = TimeValue.timeValueMillis(20);
private int numberOfIndices = 10;
private int numberOfTypes = 4;
private int numberOfValues = 20;
private int numberOfHits = 300;
private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000);
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
private TimeValue period = TimeValue.timeValueMinutes(20);
private AtomicLong indexCounter = new AtomicLong();
private AtomicLong searchCounter = new AtomicLong();
private Node client;
public Search1StressBenchmark setNumberOfNodes(int numberOfNodes) {
this.numberOfNodes = numberOfNodes;
return this;
}
public Search1StressBenchmark setPreIndexDocs(SizeValue preIndexDocs) {
this.preIndexDocs = preIndexDocs;
return this;
}
public Search1StressBenchmark setIndexers(int indexers) {
this.indexers = indexers;
return this;
}
public Search1StressBenchmark setIndexerThrottle(TimeValue indexerThrottle) {
this.indexerThrottle = indexerThrottle;
return this;
}
public Search1StressBenchmark setSearchers(int searchers) {
this.searchers = searchers;
return this;
}
public Search1StressBenchmark setSearcherThrottle(TimeValue searcherThrottle) {
this.searcherThrottle = searcherThrottle;
return this;
}
public Search1StressBenchmark setNumberOfIndices(int numberOfIndices) {
this.numberOfIndices = numberOfIndices;
return this;
}
public Search1StressBenchmark setNumberOfTypes(int numberOfTypes) {
this.numberOfTypes = numberOfTypes;
return this;
}
public Search1StressBenchmark setNumberOfValues(int numberOfValues) {
this.numberOfValues = numberOfValues;
return this;
}
public Search1StressBenchmark setNumberOfHits(int numberOfHits) {
this.numberOfHits = numberOfHits;
return this;
}
public Search1StressBenchmark setFlusherThrottle(TimeValue flusherThrottle) {
this.flusherThrottle = flusherThrottle;
return this;
}
public Search1StressBenchmark setSettings(Settings settings) {
this.settings = settings;
return this;
}
public Search1StressBenchmark setPeriod(TimeValue period) {
this.period = period;
return this;
}
private String nextIndex() {
return "test" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices;
}
private String nextType() {
return "type" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfTypes;
}
private int nextNumValue() {
return Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
}
private String nextFieldValue() {
return "value" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
}
private class Searcher extends Thread {
volatile boolean close = false;
volatile boolean closed = false;
@Override
public void run() {
while (true) {
if (close) {
closed = true;
return;
}
try {
String indexName = nextIndex();
SearchRequestBuilder builder = client.client().prepareSearch(indexName);
if (ThreadLocalRandom.current().nextBoolean()) {
builder.addSort("num", SortOrder.DESC);
} else if (ThreadLocalRandom.current().nextBoolean()) {
// add a _score based sorting, won't do any sorting, just to test...
builder.addSort("_score", SortOrder.DESC);
}
if (ThreadLocalRandom.current().nextBoolean()) {
builder.setSearchType(SearchType.DFS_QUERY_THEN_FETCH);
}
int size = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfHits;
builder.setSize(size);
if (ThreadLocalRandom.current().nextBoolean()) {
// update from
builder.setFrom(size / 2);
}
String value = nextFieldValue();
builder.setQuery(termQuery("field", value));
searchCounter.incrementAndGet();
SearchResponse searchResponse = builder.execute().actionGet();
if (searchResponse.getFailedShards() > 0) {
logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
}
// verify that all come from the requested index
for (SearchHit hit : searchResponse.getHits()) {
if (!hit.shard().index().equals(indexName)) {
logger.warn("got wrong index, asked for [{}], got [{}]", indexName, hit.shard().index());
}
}
// verify that all has the relevant value
for (SearchHit hit : searchResponse.getHits()) {
if (!value.equals(hit.sourceAsMap().get("field"))) {
logger.warn("got wrong field, asked for [{}], got [{}]", value, hit.sourceAsMap().get("field"));
}
}
Thread.sleep(searcherThrottle.millis());
} catch (Exception e) {
logger.warn("failed to search", e);
}
}
}
}
private class Indexer extends Thread {
volatile boolean close = false;
volatile boolean closed = false;
@Override
public void run() {
while (true) {
if (close) {
closed = true;
return;
}
try {
indexDoc();
Thread.sleep(indexerThrottle.millis());
} catch (Exception e) {
logger.warn("failed to index / sleep", e);
}
}
}
}
private class Flusher extends Thread {
volatile boolean close = false;
volatile boolean closed = false;
@Override
public void run() {
while (true) {
if (close) {
closed = true;
return;
}
try {
client.client().admin().indices().prepareFlush().execute().actionGet();
Thread.sleep(indexerThrottle.millis());
} catch (Exception e) {
logger.warn("failed to flush / sleep", e);
}
}
}
}
private void indexDoc() throws Exception {
XContentBuilder json = XContentFactory.jsonBuilder().startObject()
.field("num", nextNumValue())
.field("field", nextFieldValue());
json.endObject();
client.client().prepareIndex(nextIndex(), nextType())
.setSource(json)
.execute().actionGet();
indexCounter.incrementAndGet();
}
public void run() throws Exception {
Node[] nodes = new Node[numberOfNodes];
for (int i = 0; i < nodes.length; i++) {
nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
}
client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
for (int i = 0; i < numberOfIndices; i++) {
client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
}
logger.info("Pre indexing docs [{}]...", preIndexDocs);
for (long i = 0; i < preIndexDocs.singles(); i++) {
indexDoc();
}
logger.info("Done pre indexing docs [{}]", preIndexDocs);
Indexer[] indexerThreads = new Indexer[indexers];
for (int i = 0; i < indexerThreads.length; i++) {
indexerThreads[i] = new Indexer();
}
for (Indexer indexerThread : indexerThreads) {
indexerThread.start();
}
Thread.sleep(10000);
Searcher[] searcherThreads = new Searcher[searchers];
for (int i = 0; i < searcherThreads.length; i++) {
searcherThreads[i] = new Searcher();
}
for (Searcher searcherThread : searcherThreads) {
searcherThread.start();
}
Flusher flusher = null;
if (flusherThrottle.millis() > 0) {
flusher = new Flusher();
flusher.start();
}
long testStart = System.currentTimeMillis();
while (true) {
Thread.sleep(5000);
if ((System.currentTimeMillis() - testStart) > period.millis()) {
break;
}
}
System.out.println("DONE, closing .....");
if (flusher != null) {
flusher.close = true;
}
for (Searcher searcherThread : searcherThreads) {
searcherThread.close = true;
}
for (Indexer indexerThread : indexerThreads) {
indexerThread.close = true;
}
Thread.sleep(indexerThrottle.millis() + 10000);
if (flusher != null && !flusher.closed) {
logger.warn("flusher not closed!");
}
for (Searcher searcherThread : searcherThreads) {
if (!searcherThread.closed) {
logger.warn("search thread not closed!");
}
}
for (Indexer indexerThread : indexerThreads) {
if (!indexerThread.closed) {
logger.warn("index thread not closed!");
}
}
client.close();
for (Node node : nodes) {
node.close();
}
System.out.println("********** DONE, indexed [" + indexCounter.get() + "], searched [" + searchCounter.get() + "]");
}
public static void main(String[] args) throws Exception {
Search1StressBenchmark test = new Search1StressBenchmark()
.setPeriod(TimeValue.timeValueMinutes(10))
.setNumberOfNodes(2)
.setPreIndexDocs(SizeValue.parseSizeValue("100"))
.setIndexers(2)
.setIndexerThrottle(TimeValue.timeValueMillis(100))
.setSearchers(10)
.setSearcherThrottle(TimeValue.timeValueMillis(10))
.setFlusherThrottle(TimeValue.timeValueMillis(1000))
.setNumberOfIndices(10)
.setNumberOfTypes(5)
.setNumberOfValues(50)
.setNumberOfHits(300);
test.run();
}
}

View File

@ -720,7 +720,6 @@
</includes>
<excludes>
<exclude>**/*$*.class</exclude>
<exclude>**/*StressTest.class</exclude>
</excludes>
</configuration>
</execution>