Tests: Wipe cluster settings after every test (#28410)

Cluster settings shouldn't leak into the next test.

I played with failing the test if it left over any settings but that
felt like it added more ceremony then it was worth. The advantage is
that any test that intentionally wants to leave settings in place after
the test would fail and require looking at but, so far as I can tell, we
don't have any such tests.
This commit is contained in:
Nik Everett 2018-01-29 11:47:04 -05:00 committed by GitHub
parent 6f84503c33
commit 66ff1b2a59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 35 additions and 19 deletions

View File

@ -65,4 +65,3 @@ PUT _cluster/settings
} }
------------------------ ------------------------
// CONSOLE // CONSOLE
// TEST[skip:indexes don't assign]

View File

@ -97,21 +97,3 @@ PUT _cluster/settings
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
/////////////////////
Clear the settings so they don't leak into subsequent snippets.
[source,js]
--------------------------------------------------
PUT _cluster/settings
{
"transient": {
"cluster.*" : null
}
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
/////////////////////

View File

@ -37,8 +37,10 @@ import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -63,10 +65,12 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
import static java.util.Collections.sort; import static java.util.Collections.sort;
import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableList;
import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
/** /**
@ -222,6 +226,7 @@ public abstract class ESRestTestCase extends ESTestCase {
} }
wipeSnapshots(); wipeSnapshots();
wipeClusterSettings();
} }
/** /**
@ -253,6 +258,36 @@ public abstract class ESRestTestCase extends ESTestCase {
} }
} }
/**
* Remove any cluster settings.
*/
private void wipeClusterSettings() throws IOException {
Map<?, ?> getResponse = entityAsMap(adminClient().performRequest("GET", "/_cluster/settings"));
boolean mustClear = false;
XContentBuilder clearCommand = JsonXContent.contentBuilder();
clearCommand.startObject();
for (Map.Entry<?, ?> entry : getResponse.entrySet()) {
String type = entry.getKey().toString();
Map<?, ?> settings = (Map<?, ?>) entry.getValue();
if (settings.isEmpty()) {
continue;
}
mustClear = true;
clearCommand.startObject(type);
for (Object key: settings.keySet()) {
clearCommand.field(key + ".*").nullValue();
}
clearCommand.endObject();
}
clearCommand.endObject();
if (mustClear) {
adminClient().performRequest("PUT", "/_cluster/settings", emptyMap(), new StringEntity(
clearCommand.string(), ContentType.APPLICATION_JSON));
}
}
/** /**
* Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into * Logs a message if there are still running tasks. The reasoning is that any tasks still running are state the is trying to bleed into
* other tests. * other tests.