Extract the snapshot/restore full cluster restart tests from the translog full cluster restart tests (#25204)

Extract the snapshot/restore full cluster restart tests from the translog full cluster restart tests. That way they are easier to read.
This commit is contained in:
Nik Everett 2017-06-14 13:03:59 -04:00 committed by GitHub
parent aa3134c093
commit ce11b894b4
2 changed files with 212 additions and 152 deletions

View File

@ -19,7 +19,6 @@
package org.elasticsearch.upgrades; package org.elasticsearch.upgrades;
import org.apache.http.ParseException;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
@ -32,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
import org.junit.Before;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
@ -42,6 +42,7 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
@ -54,24 +55,34 @@ import static org.hamcrest.Matchers.containsString;
* with {@code tests.is_old_cluster} set to {@code false}. * with {@code tests.is_old_cluster} set to {@code false}.
*/ */
public class FullClusterRestartIT extends ESRestTestCase { public class FullClusterRestartIT extends ESRestTestCase {
private static final String REPO = "/_snapshot/repo";
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1); private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1);
private String index;
@Before
public void setIndex() {
index = getTestName().toLowerCase(Locale.ROOT);
}
@Override @Override
protected boolean preserveIndicesUponCompletion() { protected boolean preserveIndicesUponCompletion() {
return true; return true;
} }
@Override
protected boolean preserveSnapshotsUponCompletion() {
return true;
}
@Override @Override
protected boolean preserveReposUponCompletion() { protected boolean preserveReposUponCompletion() {
return true; return true;
} }
public void testSearch() throws Exception { public void testSearch() throws Exception {
String index = getTestName().toLowerCase(Locale.ROOT); int count;
if (runningAgainstOldCluster) { if (runningAgainstOldCluster) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
@ -103,8 +114,8 @@ public class FullClusterRestartIT extends ESRestTestCase {
client().performRequest("PUT", "/" + index, Collections.emptyMap(), client().performRequest("PUT", "/" + index, Collections.emptyMap(),
new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON)); new StringEntity(mappingsAndSettings.string(), ContentType.APPLICATION_JSON));
int numDocs = randomIntBetween(2000, 3000); count = randomIntBetween(2000, 3000);
indexRandomDocuments(index, numDocs, true, i -> { indexRandomDocuments(count, true, true, i -> {
return JsonXContent.contentBuilder().startObject() return JsonXContent.contentBuilder().startObject()
.field("string", randomAlphaOfLength(10)) .field("string", randomAlphaOfLength(10))
.field("int", randomInt(100)) .field("int", randomInt(100))
@ -115,45 +126,51 @@ public class FullClusterRestartIT extends ESRestTestCase {
// TODO a binary field // TODO a binary field
.endObject(); .endObject();
}); });
logger.info("Refreshing [{}]", index); refresh();
client().performRequest("POST", "/" + index + "/_refresh"); } else {
count = countOfIndexedRandomDocuments();
} }
assertBasicSearchWorks(index); assertBasicSearchWorks(count);
} }
void assertBasicSearchWorks(String index) throws IOException { void assertBasicSearchWorks(int count) throws IOException {
logger.info("--> testing basic search"); logger.info("--> testing basic search");
Map<String, Object> response = toMap(client().performRequest("GET", "/" + index + "/_search")); Map<String, Object> response = toMap(client().performRequest("GET", "/" + index + "/_search"));
assertNoFailures(response); assertNoFailures(response);
int numDocs1 = (int) XContentMapValues.extractValue("hits.total", response); int numDocs = (int) XContentMapValues.extractValue("hits.total", response);
logger.info("Found {} in old index", numDocs1); logger.info("Found {} in old index", numDocs);
assertEquals(count, numDocs);
logger.info("--> testing basic search with sort"); logger.info("--> testing basic search with sort");
String searchRequestBody = "{ \"sort\": [{ \"int\" : \"asc\" }]}"; String searchRequestBody = "{ \"sort\": [{ \"int\" : \"asc\" }]}";
response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(),
new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON)));
assertNoFailures(response); assertNoFailures(response);
int numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); numDocs = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs1, numDocs2); assertEquals(count, numDocs);
logger.info("--> testing exists filter"); logger.info("--> testing exists filter");
searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"string\"} }}"; searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"string\"} }}";
response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(),
new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON)));
assertNoFailures(response); assertNoFailures(response);
numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); numDocs = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs1, numDocs2); assertEquals(count, numDocs);
searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"field.with.dots\"} }}"; searchRequestBody = "{ \"query\": { \"exists\" : {\"field\": \"field.with.dots\"} }}";
response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), response = toMap(client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(),
new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON))); new StringEntity(searchRequestBody, ContentType.APPLICATION_JSON)));
assertNoFailures(response); assertNoFailures(response);
numDocs2 = (int) XContentMapValues.extractValue("hits.total", response); numDocs = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs1, numDocs2); assertEquals(count, numDocs);
} }
static Map<String, Object> toMap(Response response) throws IOException { static Map<String, Object> toMap(Response response) throws IOException {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); return toMap(EntityUtils.toString(response.getEntity()));
}
static Map<String, Object> toMap(String response) throws IOException {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false);
} }
static void assertNoFailures(Map<String, Object> response) { static void assertNoFailures(Map<String, Object> response) {
@ -165,7 +182,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
* Tests that a single document survives. Super basic smoke test. * Tests that a single document survives. Super basic smoke test.
*/ */
public void testSingleDoc() throws IOException { public void testSingleDoc() throws IOException {
String docLocation = "/" + getTestName().toLowerCase(Locale.ROOT) + "/doc/1"; String docLocation = "/" + index + "/doc/1";
String doc = "{\"test\": \"test\"}"; String doc = "{\"test\": \"test\"}";
if (runningAgainstOldCluster) { if (runningAgainstOldCluster) {
@ -176,11 +193,11 @@ public class FullClusterRestartIT extends ESRestTestCase {
assertThat(EntityUtils.toString(client().performRequest("GET", docLocation).getEntity()), containsString(doc)); assertThat(EntityUtils.toString(client().performRequest("GET", docLocation).getEntity()), containsString(doc));
} }
public void testRandomDocumentsAndSnapshot() throws IOException { /**
String testName = getTestName().toLowerCase(Locale.ROOT); * Tests recovery of an index with or without a translog and the
String index = testName + "_data"; * statistics we gather about that.
String infoDocument = "/" + testName + "_info/doc/info"; */
public void testRecovery() throws IOException {
int count; int count;
boolean shouldHaveTranslog; boolean shouldHaveTranslog;
if (runningAgainstOldCluster) { if (runningAgainstOldCluster) {
@ -189,34 +206,19 @@ public class FullClusterRestartIT extends ESRestTestCase {
* an index without a translog so we randomize whether * an index without a translog so we randomize whether
* or not we have one. */ * or not we have one. */
shouldHaveTranslog = randomBoolean(); shouldHaveTranslog = randomBoolean();
logger.info("Creating {} documents", count);
indexRandomDocuments(index, count, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
createSnapshot();
// Explicitly flush so we're sure to have a bunch of documents in the Lucene index // Explicitly flush so we're sure to have a bunch of documents in the Lucene index
client().performRequest("POST", "/_flush"); client().performRequest("POST", "/_flush");
if (shouldHaveTranslog) { if (shouldHaveTranslog) {
// Update a few documents so we are sure to have a translog // Update a few documents so we are sure to have a translog
indexRandomDocuments(index, count / 10, false /* Flushing here would invalidate the whole thing....*/, indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false,
i -> jsonBuilder().startObject().field("field", "value").endObject()); i -> jsonBuilder().startObject().field("field", "value").endObject());
} }
saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog));
// Record how many documents we built so we can compare later
XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject();
infoDoc.field("count", count);
infoDoc.field("should_have_translog", shouldHaveTranslog);
infoDoc.endObject();
client().performRequest("PUT", infoDocument, singletonMap("refresh", "true"),
new StringEntity(infoDoc.string(), ContentType.APPLICATION_JSON));
} else { } else {
// Load the number of documents that were written to the old cluster count = countOfIndexedRandomDocuments();
String doc = EntityUtils.toString( shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument("should_have_translog"));
client().performRequest("GET", infoDocument, singletonMap("filter_path", "_source")).getEntity());
Matcher m = Pattern.compile("\"count\":(\\d+)").matcher(doc);
assertTrue(doc, m.find());
count = Integer.parseInt(m.group(1));
m = Pattern.compile("\"should_have_translog\":(true|false)").matcher(doc);
assertTrue(doc, m.find());
shouldHaveTranslog = Booleans.parseBoolean(m.group(1));
} }
// Count the documents in the index to make sure we have as many as we put there // Count the documents in the index to make sure we have as many as we put there
@ -225,106 +227,100 @@ public class FullClusterRestartIT extends ESRestTestCase {
assertThat(countResponse, containsString("\"total\":" + count)); assertThat(countResponse, containsString("\"total\":" + count));
if (false == runningAgainstOldCluster) { if (false == runningAgainstOldCluster) {
assertTranslogRecoveryStatistics(index, shouldHaveTranslog); boolean restoredFromTranslog = false;
} boolean foundPrimary = false;
Map<String, String> params = new HashMap<>();
restoreSnapshot(index, count); params.put("h", "index,shard,type,stage,translog_ops_recovered");
params.put("s", "index,shard,type");
// TODO finish adding tests for the things in OldIndexBackwardsCompatibilityIT String recoveryResponse = EntityUtils.toString(client().performRequest("GET", "/_cat/recovery/" + index, params).getEntity());
} for (String line : recoveryResponse.split("\n")) {
// Find the primaries
// TODO tests for upgrades after shrink. We've had trouble with shrink in the past. foundPrimary = true;
if (false == line.contains("done") && line.contains("existing_store")) {
private void indexRandomDocuments(String index, int count, boolean flushAllowed,
CheckedFunction<Integer, XContentBuilder, IOException> docSupplier) throws IOException {
for (int i = 0; i < count; i++) {
logger.debug("Indexing document [{}]", i);
client().performRequest("POST", "/" + index + "/doc/" + i, emptyMap(),
new StringEntity(docSupplier.apply(i).string(), ContentType.APPLICATION_JSON));
if (rarely()) {
logger.info("Refreshing [{}]", index);
client().performRequest("POST", "/" + index + "/_refresh");
}
if (flushAllowed && rarely()) {
logger.info("Flushing [{}]", index);
client().performRequest("POST", "/" + index + "/_flush");
}
}
}
private void createSnapshot() throws IOException {
XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); {
repoConfig.field("type", "fs");
repoConfig.startObject("settings"); {
repoConfig.field("compress", randomBoolean());
repoConfig.field("location", System.getProperty("tests.path.repo"));
}
repoConfig.endObject();
}
repoConfig.endObject();
client().performRequest("PUT", REPO, emptyMap(), new StringEntity(repoConfig.string(), ContentType.APPLICATION_JSON));
client().performRequest("PUT", REPO + "/snap", singletonMap("wait_for_completion", "true"));
}
private void assertTranslogRecoveryStatistics(String index, boolean shouldHaveTranslog) throws ParseException, IOException {
boolean restoredFromTranslog = false;
boolean foundPrimary = false;
Map<String, String> params = new HashMap<>();
params.put("h", "index,shard,type,stage,translog_ops_recovered");
params.put("s", "index,shard,type");
String recoveryResponse = EntityUtils.toString(client().performRequest("GET", "/_cat/recovery/" + index, params).getEntity());
for (String line : recoveryResponse.split("\n")) {
// Find the primaries
foundPrimary = true;
if (false == line.contains("done") && line.contains("existing_store")) {
continue;
}
/* Mark if we see a primary that looked like it restored from the translog.
* Not all primaries will look like this all the time because we modify
* random documents when we want there to be a translog and they might
* not be spread around all the shards. */
Matcher m = Pattern.compile("(\\d+)$").matcher(line);
assertTrue(line, m.find());
int translogOps = Integer.parseInt(m.group(1));
if (translogOps > 0) {
restoredFromTranslog = true;
}
}
assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary);
assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog);
String currentLuceneVersion = Version.CURRENT.luceneVersion.toString();
String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString();
if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) {
int numCurrentVersion = 0;
int numBwcVersion = 0;
params.clear();
params.put("h", "prirep,shard,index,version");
params.put("s", "prirep,shard,index");
String segmentsResponse = EntityUtils.toString(
client().performRequest("GET", "/_cat/segments/" + index, params).getEntity());
for (String line : segmentsResponse.split("\n")) {
if (false == line.startsWith("p")) {
continue; continue;
} }
Matcher m = Pattern.compile("(\\d+\\.\\d+\\.\\d+)$").matcher(line); /* Mark if we see a primary that looked like it restored from the translog.
* Not all primaries will look like this all the time because we modify
* random documents when we want there to be a translog and they might
* not be spread around all the shards. */
Matcher m = Pattern.compile("(\\d+)$").matcher(line);
assertTrue(line, m.find()); assertTrue(line, m.find());
String version = m.group(1); int translogOps = Integer.parseInt(m.group(1));
if (currentLuceneVersion.equals(version)) { if (translogOps > 0) {
numCurrentVersion++; restoredFromTranslog = true;
} else if (bwcLuceneVersion.equals(version)) {
numBwcVersion++;
} else {
fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line);
} }
} }
assertNotEquals("expected at least 1 current segment after translog recovery", 0, numCurrentVersion); assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary);
assertNotEquals("expected at least 1 old segment", 0, numBwcVersion); assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog);
String currentLuceneVersion = Version.CURRENT.luceneVersion.toString();
String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString();
if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) {
int numCurrentVersion = 0;
int numBwcVersion = 0;
params.clear();
params.put("h", "prirep,shard,index,version");
params.put("s", "prirep,shard,index");
String segmentsResponse = EntityUtils.toString(
client().performRequest("GET", "/_cat/segments/" + index, params).getEntity());
for (String line : segmentsResponse.split("\n")) {
if (false == line.startsWith("p")) {
continue;
}
Matcher m = Pattern.compile("(\\d+\\.\\d+\\.\\d+)$").matcher(line);
assertTrue(line, m.find());
String version = m.group(1);
if (currentLuceneVersion.equals(version)) {
numCurrentVersion++;
} else if (bwcLuceneVersion.equals(version)) {
numBwcVersion++;
} else {
fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line);
}
}
assertNotEquals("expected at least 1 current segment after translog recovery", 0, numCurrentVersion);
assertNotEquals("expected at least 1 old segment", 0, numBwcVersion);
}
} }
} }
private void restoreSnapshot(String index, int count) throws ParseException, IOException { public void testSnapshotRestore() throws IOException {
int count;
if (runningAgainstOldCluster) {
count = between(200, 300);
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
// Create the repo and the snapshot
XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); {
repoConfig.field("type", "fs");
repoConfig.startObject("settings"); {
repoConfig.field("compress", randomBoolean());
repoConfig.field("location", System.getProperty("tests.path.repo"));
}
repoConfig.endObject();
}
repoConfig.endObject();
client().performRequest("PUT", "/_snapshot/repo", emptyMap(),
new StringEntity(repoConfig.string(), ContentType.APPLICATION_JSON));
XContentBuilder snapshotConfig = JsonXContent.contentBuilder().startObject(); {
snapshotConfig.field("indices", index);
}
snapshotConfig.endObject();
client().performRequest("PUT", "/_snapshot/repo/snap", singletonMap("wait_for_completion", "true"),
new StringEntity(snapshotConfig.string(), ContentType.APPLICATION_JSON));
// Refresh the index so the count doesn't fail
refresh();
} else {
count = countOfIndexedRandomDocuments();
}
// Count the documents in the index to make sure we have as many as we put there
String countResponse = EntityUtils.toString(
client().performRequest("GET", "/" + index + "/_search", singletonMap("size", "0")).getEntity());
assertThat(countResponse, containsString("\"total\":" + count));
if (false == runningAgainstOldCluster) { if (false == runningAgainstOldCluster) {
/* Remove any "restored" indices from the old cluster run of this test. /* Remove any "restored" indices from the old cluster run of this test.
* We intentionally don't remove them while running this against the * We intentionally don't remove them while running this against the
@ -333,25 +329,79 @@ public class FullClusterRestartIT extends ESRestTestCase {
client().performRequest("DELETE", "/restored_*"); client().performRequest("DELETE", "/restored_*");
} }
if (runningAgainstOldCluster) { // Check the metadata, especially the version
// TODO restoring the snapshot seems to fail! This seems like a bug. String response = EntityUtils.toString(
XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); client().performRequest("GET", "/_snapshot/repo/_all", singletonMap("verbose", "true")).getEntity());
restoreCommand.field("include_global_state", false); Map<String, Object> map = toMap(response);
restoreCommand.field("indices", index); assertEquals(response, singletonList("snap"), XContentMapValues.extractValue("snapshots.snapshot", map));
restoreCommand.field("rename_pattern", index); assertEquals(response, singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", map));
restoreCommand.field("rename_replacement", "restored_" + index); assertEquals(response, singletonList(oldClusterVersion.toString()), XContentMapValues.extractValue("snapshots.version", map));
restoreCommand.endObject();
client().performRequest("POST", REPO + "/snap/_restore", singletonMap("wait_for_completion", "true"),
new StringEntity(restoreCommand.string(), ContentType.APPLICATION_JSON));
String countResponse = EntityUtils.toString( XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject();
client().performRequest("GET", "/restored_" + index + "/_search", singletonMap("size", "0")).getEntity()); restoreCommand.field("include_global_state", randomBoolean());
assertThat(countResponse, containsString("\"total\":" + count)); restoreCommand.field("indices", index);
restoreCommand.field("rename_pattern", index);
restoreCommand.field("rename_replacement", "restored_" + index);
restoreCommand.endObject();
client().performRequest("POST", "/_snapshot/repo/snap/_restore", singletonMap("wait_for_completion", "true"),
new StringEntity(restoreCommand.string(), ContentType.APPLICATION_JSON));
countResponse = EntityUtils.toString(
client().performRequest("GET", "/restored_" + index + "/_search", singletonMap("size", "0")).getEntity());
assertThat(countResponse, containsString("\"total\":" + count));
}
// TODO tests for upgrades after shrink. We've had trouble with shrink in the past.
private void indexRandomDocuments(int count, boolean flushAllowed, boolean saveInfo,
CheckedFunction<Integer, XContentBuilder, IOException> docSupplier) throws IOException {
logger.info("Indexing {} random documents", count);
for (int i = 0; i < count; i++) {
logger.debug("Indexing document [{}]", i);
client().performRequest("POST", "/" + index + "/doc/" + i, emptyMap(),
new StringEntity(docSupplier.apply(i).string(), ContentType.APPLICATION_JSON));
if (rarely()) {
refresh();
}
if (flushAllowed && rarely()) {
logger.debug("Flushing [{}]", index);
client().performRequest("POST", "/" + index + "/_flush");
}
} }
if (saveInfo) {
saveInfoDocument("count", Integer.toString(count));
}
}
private int countOfIndexedRandomDocuments() throws IOException {
return Integer.parseInt(loadInfoDocument("count"));
}
private void saveInfoDocument(String type, String value) throws IOException {
XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject();
infoDoc.field("value", value);
infoDoc.endObject();
// Only create the first version so we know how many documents are created when the index is first created
Map<String, String> params = singletonMap("op_type", "create");
client().performRequest("PUT", "/info/doc/" + index + "_" + type, params,
new StringEntity(infoDoc.string(), ContentType.APPLICATION_JSON));
}
private String loadInfoDocument(String type) throws IOException {
String doc = EntityUtils.toString(
client().performRequest("GET", "/info/doc/" + index + "_" + type, singletonMap("filter_path", "_source")).getEntity());
Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc);
assertTrue(doc, m.find());
return m.group(1);
} }
private Object randomLenientBoolean() { private Object randomLenientBoolean() {
return randomFrom(new Object[] {"off", "no", "0", 0, "false", false, "on", "yes", "1", 1, "true", true}); return randomFrom(new Object[] {"off", "no", "0", 0, "false", false, "on", "yes", "1", 1, "true", true});
} }
private void refresh() throws IOException {
logger.debug("Refreshing [{}]", index);
client().performRequest("POST", "/" + index + "/_refresh");
}
} }

View File

@ -181,11 +181,21 @@ public abstract class ESRestTestCase extends ESTestCase {
/** /**
* Returns whether to preserve the repositories on completion of this test. * Returns whether to preserve the repositories on completion of this test.
* Defaults to not preserving repos. See also
* {@link #preserveSnapshotsUponCompletion()}.
*/ */
protected boolean preserveReposUponCompletion() { protected boolean preserveReposUponCompletion() {
return false; return false;
} }
/**
* Returns whether to preserve the snapshots in repositories on completion of this
* test. Defaults to not preserving snapshots. Only works for {@code fs} repositories.
*/
protected boolean preserveSnapshotsUponCompletion() {
return false;
}
private void wipeCluster() throws IOException { private void wipeCluster() throws IOException {
if (preserveIndicesUponCompletion() == false) { if (preserveIndicesUponCompletion() == false) {
// wipe indices // wipe indices
@ -217,7 +227,7 @@ public abstract class ESRestTestCase extends ESTestCase {
String repoName = repo.getKey(); String repoName = repo.getKey();
Map<?, ?> repoSpec = (Map<?, ?>) repo.getValue(); Map<?, ?> repoSpec = (Map<?, ?>) repo.getValue();
String repoType = (String) repoSpec.get("type"); String repoType = (String) repoSpec.get("type");
if (repoType.equals("fs")) { if (false == preserveSnapshotsUponCompletion() && repoType.equals("fs")) {
// All other repo types we really don't have a chance of being able to iterate properly, sadly. // All other repo types we really don't have a chance of being able to iterate properly, sadly.
String url = "_snapshot/" + repoName + "/_all"; String url = "_snapshot/" + repoName + "/_all";
Map<String, String> params = singletonMap("ignore_unavailable", "true"); Map<String, String> params = singletonMap("ignore_unavailable", "true");