QA: Switch xpack rolling upgrades to three nodes (#31112)

This is much more realistic and can find more issues. This causes the
"mixed cluster" tests to be run twice so I had to fix the tests to work
in that case. In most cases I did as little as possible to get them
working but in a few cases I went a little beyond that to make them
easier for me to debug while getting them to work. My test changes:

1. Remove the "basic indexing" tests and replace them with a copy of the
tests used in the OSS. We have no way of sharing code between these two
projects so for now I copy.
2. Skip the a few tests in the "one third" upgraded scenario:
  * creating a scroll to be reused when the cluster is fully upgraded
  * creating some ml data to be used when the cluster is fully ugpraded
3. Drop many "assert yellow and that the cluster has two nodes"
assertions. These assertions duplicate those made by the wait condition
and they fail now that we have three nodes.
4. Switch many "assert green and that the cluster has two nodes" to 3
nodes. These assertions are unique from the wait condition and, while
I imagine they aren't required in all cases, now is not the time to
find that out. Thus, I made them work.
5. Rework the index audit trail test so it is more obvious that it is
the same test expecting different numbers based on the shape of the
cluster. The conditions for which number are expected are fairly
complex because the index audit trail is shut down until the template
for it is upgraded and the template is upgraded when a master node is
elected that has the new version of the software.
6. Add some more information to debug the index audit trail test because
it helped me figure out what was going on.

I also dropped the `waitCondition` from the `rolling-upgrade-basic`
tests because it wasn't needed.

Closes #25336
This commit is contained in:
Nik Everett 2018-06-06 11:59:16 -04:00 committed by GitHub
parent 6fd4eb52b8
commit 7c59e7690e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 315 additions and 453 deletions

View File

@ -30,6 +30,10 @@ import java.nio.charset.StandardCharsets;
* Basic test that indexed documents survive the rolling restart. See * Basic test that indexed documents survive the rolling restart. See
* {@link RecoveryIT} for much more in depth testing of the mechanism * {@link RecoveryIT} for much more in depth testing of the mechanism
* by which they survive. * by which they survive.
* <p>
* This test is an almost exact copy of <code>IndexingIT</code> in the
* xpack rolling restart tests. We should work on a way to remove this
* duplication but for now we have no real way to share code.
*/ */
public class IndexingIT extends AbstractRollingTestCase { public class IndexingIT extends AbstractRollingTestCase {
public void testIndexing() throws IOException { public void testIndexing() throws IOException {

View File

@ -8,62 +8,9 @@ apply plugin: 'elasticsearch.standalone-test'
dependencies { dependencies {
testCompile project(path: xpackModule('core'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'runtime')
testCompile project(path: xpackModule('security'), configuration: 'runtime')
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit
} }
Closure waitWithAuth = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
// wait up to two minutes
final long stopTime = System.currentTimeMillis() + (2 * 60000L);
Exception lastException = null;
int lastResponseCode = 0
while (System.currentTimeMillis() < stopTime) {
lastException = null;
// we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned
HttpURLConnection httpURLConnection = null;
try {
// TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling
httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection();
httpURLConnection.setRequestProperty("Authorization", "Basic " +
Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
httpURLConnection.setRequestMethod("GET");
httpURLConnection.setConnectTimeout(1000);
httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes!
httpURLConnection.connect();
lastResponseCode = httpURLConnection.getResponseCode()
if (lastResponseCode == 200) {
tmpFile.withWriter StandardCharsets.UTF_8.name(), {
it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name()))
}
break;
}
} catch (Exception e) {
logger.debug("failed to call cluster health", e)
lastException = e
} finally {
if (httpURLConnection != null) {
httpURLConnection.disconnect();
}
}
// did not start, so wait a bit before trying again
Thread.sleep(500L);
}
if (tmpFile.exists() == false) {
final String message = "final attempt of calling cluster health failed [lastResponseCode=${lastResponseCode}]"
if (lastException != null) {
logger.error(message, lastException)
} else {
logger.error(message + " [no exception]")
}
}
return tmpFile.exists()
}
// This is a top level task which we will add dependencies to below. // This is a top level task which we will add dependencies to below.
// It is a single task that can be used to backcompat tests against all versions. // It is a single task that can be used to backcompat tests against all versions.
task bwcTest { task bwcTest {
@ -82,14 +29,13 @@ for (Version version : bwcVersions.wireCompatible) {
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
if (version.before('6.3.0')) { if (version.before('6.3.0')) {
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
} }
bwcVersion = version bwcVersion = version
numBwcNodes = 2 numBwcNodes = 3
numNodes = 2 numNodes = 3
minimumMasterNodes = { 2 } minimumMasterNodes = { 3 }
clusterName = 'rolling-upgrade-basic' clusterName = 'rolling-upgrade-basic'
waitCondition = waitWithAuth
setting 'xpack.security.enabled', 'false' setting 'xpack.security.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false' setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.ml.enabled', 'false' setting 'xpack.ml.enabled', 'false'
@ -102,51 +48,62 @@ for (Version version : bwcVersions.wireCompatible) {
systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.rest.suite', 'old_cluster'
} }
Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
configure(extensions.findByName("${baseName}#${name}")) {
configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" clusterName = 'rolling-upgrade-basic'
clusterName = 'rolling-upgrade-basic' unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } minimumMasterNodes = { 3 }
minimumMasterNodes = { 2 } /* Override the data directory so the new node always gets the node we
dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } * just stopped's data directory. */
waitCondition = waitWithAuth dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
setting 'xpack.security.enabled', 'false' setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
setting 'xpack.monitoring.enabled', 'false' setting 'xpack.security.enabled', 'false'
setting 'xpack.ml.enabled', 'false' setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.watcher.enabled', 'false' setting 'xpack.ml.enabled', 'false'
setting 'xpack.license.self_generated.type', 'basic' setting 'xpack.watcher.enabled', 'false'
setting 'node.name', 'mixed-node-0' setting 'xpack.license.self_generated.type', 'basic'
}
} }
Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)
mixedClusterTestRunner.configure {
configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
0, { oldClusterTest.nodes.get(1).transportUri() })
Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
oneThirdUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.rest.suite', 'mixed_cluster'
finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" systemProperty 'tests.first_round', 'true'
finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
}
Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)
configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })
Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
twoThirdsUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.first_round', 'false'
finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
} }
Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)
configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })
clusterName = 'rolling-upgrade-basic'
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
minimumMasterNodes = { 2 }
dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir }
waitCondition = waitWithAuth
setting 'xpack.security.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.ml.enabled', 'false'
setting 'xpack.watcher.enabled', 'false'
setting 'xpack.license.self_generated.type', 'basic'
setting 'node.name', 'upgraded-node-0'
}
Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
upgradedClusterTestRunner.configure { upgradedClusterTestRunner.configure {
systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.rest.suite', 'upgraded_cluster'
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion /*
finalizedBy "${baseName}#mixedClusterTestCluster#stop" * Force stopping all the upgraded nodes after the test runner
* so they are alive during the test.
*/
finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
} }
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
@ -170,11 +127,6 @@ task integTest {
} }
check.dependsOn(integTest) check.dependsOn(integTest)
dependencies {
testCompile project(path: xpackModule('core'), configuration: 'runtime')
testCompile project(path: xpackModule('core'), configuration: 'testArtifacts')
}
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
// copy x-pack plugin info so it is on the classpath and security manager has the right permissions // copy x-pack plugin info so it is on the classpath and security manager has the right permissions

View File

@ -30,7 +30,7 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant ->
HttpURLConnection httpURLConnection = null; HttpURLConnection httpURLConnection = null;
try { try {
// TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling
httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=3&wait_for_status=yellow").openConnection();
httpURLConnection.setRequestProperty("Authorization", "Basic " + httpURLConnection.setRequestProperty("Authorization", "Basic " +
Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
httpURLConnection.setRequestMethod("GET"); httpURLConnection.setRequestMethod("GET");
@ -128,9 +128,9 @@ subprojects {
String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users'
setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
bwcVersion = version bwcVersion = version
numBwcNodes = 2 numBwcNodes = 3
numNodes = 2 numNodes = 3
minimumMasterNodes = { 2 } minimumMasterNodes = { 3 }
clusterName = 'rolling-upgrade' clusterName = 'rolling-upgrade'
waitCondition = waitWithAuth waitCondition = waitWithAuth
setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.type', 'http'
@ -167,78 +167,84 @@ subprojects {
systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.rest.suite', 'old_cluster'
} }
Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
configure(extensions.findByName("${baseName}#${name}")) {
configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' clusterName = 'rolling-upgrade'
clusterName = 'rolling-upgrade' unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } minimumMasterNodes = { 3 }
minimumMasterNodes = { 2 } /* Override the data directory so the new node always gets the node we
dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } * just stopped's data directory. */
waitCondition = waitWithAuth dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
setting 'xpack.monitoring.exporters._http.type', 'http' waitCondition = waitWithAuth
setting 'xpack.monitoring.exporters._http.enabled', 'false' setting 'xpack.monitoring.exporters._http.type', 'http'
setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' setting 'xpack.monitoring.exporters._http.enabled', 'false'
setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' setting 'xpack.monitoring.exporters._http.auth.username', 'test_user'
setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password'
setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial'
setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.security.enabled', 'true'
setting 'xpack.ssl.keystore.path', 'testnode.jks' setting 'xpack.security.transport.ssl.enabled', 'true'
keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' setting 'xpack.ssl.keystore.path', 'testnode.jks'
setting 'node.attr.upgraded', 'first' keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode'
setting 'xpack.security.authc.token.enabled', 'true' setting 'node.attr.upgraded', 'true'
setting 'xpack.security.audit.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true'
setting 'xpack.security.audit.outputs', 'index' setting 'xpack.security.audit.enabled', 'true'
setting 'node.name', 'mixed-node-0' setting 'xpack.security.audit.outputs', 'index'
dependsOn copyTestNodeKeystore setting 'node.name', "upgraded-node-${stopNode}"
extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') dependsOn copyTestNodeKeystore
if (withSystemKey) { extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks')
setting 'xpack.watcher.encrypt_sensitive_data', 'true' if (withSystemKey) {
keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" setting 'xpack.watcher.encrypt_sensitive_data', 'true'
keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key"
}
} }
} }
Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)
mixedClusterTestRunner.configure {
configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
0, { oldClusterTest.nodes.get(1).transportUri() })
Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
oneThirdUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.rest.suite', 'mixed_cluster'
finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" systemProperty 'tests.first_round', 'true'
// We only need to run these tests once so we may as well do it when we're two thirds upgraded
systemProperty 'tests.rest.blacklist', [
'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade',
'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data',
'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster',
].join(',')
finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
}
Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)
configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })
Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
twoThirdsUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.first_round', 'false'
finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
} }
Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)
configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })
setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
minimumMasterNodes = { 2 }
dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir }
waitCondition = waitWithAuth
setting 'xpack.monitoring.exporters._http.type', 'http'
setting 'xpack.monitoring.exporters._http.enabled', 'false'
setting 'xpack.monitoring.exporters._http.auth.username', 'test_user'
setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password'
setting 'xpack.license.self_generated.type', 'trial'
setting 'xpack.security.enabled', 'true'
setting 'xpack.security.transport.ssl.enabled', 'true'
setting 'xpack.ssl.keystore.path', 'testnode.jks'
keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode'
setting 'xpack.security.authc.token.enabled', 'true'
setting 'xpack.security.audit.enabled', 'true'
setting 'xpack.security.audit.outputs', 'index'
setting 'node.name', 'upgraded-node-0'
dependsOn copyTestNodeKeystore
extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks')
if (withSystemKey) {
setting 'xpack.watcher.encrypt_sensitive_data', 'true'
keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key"
}
}
Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
upgradedClusterTestRunner.configure { upgradedClusterTestRunner.configure {
systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.rest.suite', 'upgraded_cluster'
/*
* Force stopping all the upgraded nodes after the test runner
* so they are alive during the test.
*/
finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
// migration tests should only run when the original/old cluster nodes where versions < 5.2.0. // migration tests should only run when the original/old cluster nodes where versions < 5.2.0.
// this stinks but we do the check here since our rest tests do not support conditionals // this stinks but we do the check here since our rest tests do not support conditionals
@ -251,8 +257,6 @@ subprojects {
systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster'
} }
} }
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
finalizedBy "${baseName}#mixedClusterTestCluster#stop"
} }
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {

View File

@ -37,12 +37,12 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase {
return true; return true;
} }
enum CLUSTER_TYPE { enum ClusterType {
OLD, OLD,
MIXED, MIXED,
UPGRADED; UPGRADED;
public static CLUSTER_TYPE parse(String value) { public static ClusterType parse(String value) {
switch (value) { switch (value) {
case "old_cluster": case "old_cluster":
return OLD; return OLD;
@ -56,7 +56,7 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase {
} }
} }
protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite"));
@Override @Override
protected Settings restClientSettings() { protected Settings restClientSettings() {

View File

@ -8,37 +8,48 @@ package org.elasticsearch.upgrades;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.elasticsearch.Version;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.common.Booleans;
import org.hamcrest.Matchers; import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.hasSize;
public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase {
public void testDocsAuditedInOldCluster() throws Exception { public void testAuditLogs() throws Exception {
assumeTrue("only runs against old cluster", clusterType == CLUSTER_TYPE.OLD);
assertBusy(() -> { assertBusy(() -> {
assertAuditDocsExist(); assertAuditDocsExist();
assertNumUniqueNodeNameBuckets(2); assertNumUniqueNodeNameBuckets(expectedNumUniqueNodeNameBuckets());
}); }, 1, TimeUnit.HOURS);
} }
public void testDocsAuditedInMixedCluster() throws Exception { private int expectedNumUniqueNodeNameBuckets() throws IOException {
assumeTrue("only runs against mixed cluster", clusterType == CLUSTER_TYPE.MIXED); switch (CLUSTER_TYPE) {
assertBusy(() -> { case OLD:
assertAuditDocsExist(); // There are three nodes in the initial test cluster
assertNumUniqueNodeNameBuckets(2); return 3;
}); case MIXED:
} if (false == masterIsNewVersion()) {
return 3;
public void testDocsAuditedInUpgradedCluster() throws Exception { }
assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
assertBusy(() -> { // One of the old nodes has been removed and we've added a new node
assertAuditDocsExist(); return 4;
assertNumUniqueNodeNameBuckets(4); }
}); // Two of the old nodes have been removed and we've added two new nodes
return 5;
case UPGRADED:
return 6;
default:
throw new IllegalArgumentException("Unsupported cluster type [" + CLUSTER_TYPE + "]");
}
} }
private void assertAuditDocsExist() throws Exception { private void assertAuditDocsExist() throws Exception {
@ -51,26 +62,40 @@ public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase {
private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception {
// call API that will hit all nodes // call API that will hit all nodes
assertEquals(200, client().performRequest("GET", "/_nodes").getStatusLine().getStatusCode()); Map<?, ?> nodesResponse = entityAsMap(client().performRequest("GET", "/_nodes/_all/info/version"));
logger.info("all nodes {}", nodesResponse);
HttpEntity httpEntity = new StringEntity( HttpEntity httpEntity = new StringEntity(
"{\n" + "{\n" +
" \"aggs\" : {\n" + " \"aggs\" : {\n" +
" \"nodes\" : {\n" + " \"nodes\" : {\n" +
" \"terms\" : { \"field\" : \"node_name\" }\n" + " \"terms\" : { \"field\" : \"node_name\" }\n" +
" }\n" + " }\n" +
" }\n" + " }\n" +
"}", ContentType.APPLICATION_JSON); "}", ContentType.APPLICATION_JSON);
Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search", Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search",
Collections.singletonMap("pretty", "true"), httpEntity); Collections.singletonMap("pretty", "true"), httpEntity);
Map<String, Object> aggResponseMap = entityAsMap(aggResponse); Map<String, Object> aggResponseMap = entityAsMap(aggResponse);
logger.debug("aggResponse {}", aggResponseMap); logger.debug("aggResponse {}", aggResponseMap);
Map<String, Object> aggregations = (Map<String, Object>) aggResponseMap.get("aggregations"); Map<?, ?> aggregations = (Map<?, ?>) aggResponseMap.get("aggregations");
assertNotNull(aggregations); assertNotNull(aggregations);
Map<String, Object> nodesAgg = (Map<String, Object>) aggregations.get("nodes"); Map<?, ?> nodesAgg = (Map<?, ?>) aggregations.get("nodes");
assertNotNull(nodesAgg); assertNotNull(nodesAgg);
List<Map<String, Object>> buckets = (List<Map<String, Object>>) nodesAgg.get("buckets"); List<?> buckets = (List<?>) nodesAgg.get("buckets");
assertNotNull(buckets); assertNotNull(buckets);
assertEquals("Found node buckets " + buckets, numBuckets, buckets.size()); assertThat("Found node buckets " + buckets, buckets, hasSize(numBuckets));
}
/**
* Has the master been upgraded to the new version?
* @throws IOException
*/
private boolean masterIsNewVersion() throws IOException {
Map<?, ?> map = entityAsMap(client().performRequest("GET", "/_nodes/_master"));
map = (Map<?, ?>) map.get("nodes");
assertThat(map.values(), hasSize(1));
map = (Map<?, ?>) map.values().iterator().next();
Version masterVersion = Version.fromString(map.get("version").toString());
return Version.CURRENT.equals(masterVersion);
} }
} }

View File

@ -0,0 +1,124 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.upgrades;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
/**
* Basic test that indexed documents survive the rolling restart.
* <p>
* This test is an almost exact copy of <code>IndexingIT</code> in the
* oss rolling restart tests. We should work on a way to remove this
* duplication but for now we have no real way to share code.
*/
public class IndexingIT extends AbstractUpgradeTestCase {
public void testIndexing() throws IOException {
switch (CLUSTER_TYPE) {
case OLD:
break;
case MIXED:
Request waitForYellow = new Request("GET", "/_cluster/health");
waitForYellow.addParameter("wait_for_nodes", "3");
waitForYellow.addParameter("wait_for_status", "yellow");
client().performRequest(waitForYellow);
break;
case UPGRADED:
Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index");
waitForGreen.addParameter("wait_for_nodes", "3");
waitForGreen.addParameter("wait_for_status", "green");
// wait for long enough that we give delayed unassigned shards to stop being delayed
waitForGreen.addParameter("timeout", "70s");
waitForGreen.addParameter("level", "shards");
client().performRequest(waitForGreen);
break;
default:
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
}
if (CLUSTER_TYPE == ClusterType.OLD) {
Request createTestIndex = new Request("PUT", "/test_index");
createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}");
client().performRequest(createTestIndex);
String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}";
Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas");
createIndexWithReplicas.setJsonEntity(recoverQuickly);
client().performRequest(createIndexWithReplicas);
Request createEmptyIndex = new Request("PUT", "/empty_index");
// Ask for recovery to be quick
createEmptyIndex.setJsonEntity(recoverQuickly);
client().performRequest(createEmptyIndex);
bulk("test_index", "_OLD", 5);
bulk("index_with_replicas", "_OLD", 5);
}
int expectedCount;
switch (CLUSTER_TYPE) {
case OLD:
expectedCount = 5;
break;
case MIXED:
if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) {
expectedCount = 5;
} else {
expectedCount = 10;
}
break;
case UPGRADED:
expectedCount = 15;
break;
default:
throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]");
}
assertCount("test_index", expectedCount);
assertCount("index_with_replicas", 5);
assertCount("empty_index", 0);
if (CLUSTER_TYPE != ClusterType.OLD) {
bulk("test_index", "_" + CLUSTER_TYPE, 5);
Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted");
toBeDeleted.addParameter("refresh", "true");
toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}");
client().performRequest(toBeDeleted);
assertCount("test_index", expectedCount + 6);
Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted");
delete.addParameter("refresh", "true");
client().performRequest(delete);
assertCount("test_index", expectedCount + 5);
}
}
private void bulk(String index, String valueSuffix, int count) throws IOException {
StringBuilder b = new StringBuilder();
for (int i = 0; i < count; i++) {
b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n");
b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n");
}
Request bulk = new Request("POST", "/_bulk");
bulk.addParameter("refresh", "true");
bulk.setJsonEntity(b.toString());
client().performRequest(bulk);
}
private void assertCount(String index, int count) throws IOException {
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
searchTestIndexRequest.addParameter("filter_path", "hits.total");
Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest);
assertEquals("{\"hits\":{\"total\":" + count + "}}",
EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8));
}
}

View File

@ -25,7 +25,7 @@ import java.util.Map;
public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
public void testGeneratingTokenInOldCluster() throws Exception { public void testGeneratingTokenInOldCluster() throws Exception {
assumeTrue("this test should only run against the old cluster", clusterType == CLUSTER_TYPE.OLD); assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD);
final StringEntity tokenPostBody = new StringEntity("{\n" + final StringEntity tokenPostBody = new StringEntity("{\n" +
" \"username\": \"test_user\",\n" + " \"username\": \"test_user\",\n" +
" \"password\": \"x-pack-test-password\",\n" + " \"password\": \"x-pack-test-password\",\n" +
@ -61,7 +61,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception {
assumeTrue("this test should only run against the mixed or upgraded cluster", assumeTrue("this test should only run against the mixed or upgraded cluster",
clusterType == CLUSTER_TYPE.MIXED || clusterType == CLUSTER_TYPE.UPGRADED); CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED);
Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1");
assertOK(getResponse); assertOK(getResponse);
Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source"); Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source");
@ -69,7 +69,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
} }
public void testMixedCluster() throws Exception { public void testMixedCluster() throws Exception {
assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED); assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED);
assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion());
Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2");
assertOK(getResponse); assertOK(getResponse);
@ -117,7 +117,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
} }
public void testUpgradedCluster() throws Exception { public void testUpgradedCluster() throws Exception {
assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED); assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED);
Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2");
assertOK(getResponse); assertOK(getResponse);
Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source"); Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source");

View File

@ -1,166 +1,13 @@
--- ---
setup: "Start scroll in mixed cluster on upgraded node that we will continue after upgrade":
- do:
cluster.health:
# if the primary shard of an index with (number_of_replicas > 0) ends up on the new node, the replica cannot be
# allocated to the old node (see NodeVersionAllocationDecider). x-pack automatically creates indices with
# replicas, for example monitoring-data-*.
wait_for_status: yellow
wait_for_nodes: 2
---
"Index data and search on the mixed cluster":
- do:
search:
index: test_index
- match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v1_mixed", "f2": 5}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v2_mixed", "f2": 6}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v3_mixed", "f2": 7}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v4_mixed", "f2": 8}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v5_mixed", "f2": 9}'
- do:
index:
index: test_index
type: test_type
id: d10
body: {"f1": "v6_mixed", "f2": 10}
- do:
index:
index: test_index
type: test_type
id: d11
body: {"f1": "v7_mixed", "f2": 11}
- do:
index:
index: test_index
type: test_type
id: d12
body: {"f1": "v8_mixed", "f2": 12}
- do:
indices.refresh:
index: test_index
- do:
search:
index: test_index
- match: { hits.total: 13 } # 5 docs from old cluster, 8 docs from mixed cluster
- do:
delete:
index: test_index
type: test_type
id: d10
- do:
delete:
index: test_index
type: test_type
id: d11
- do:
delete:
index: test_index
type: test_type
id: d12
- do:
indices.refresh:
index: test_index
---
"Basic scroll mixed":
- do:
indices.create:
index: test_scroll
- do:
index:
index: test_scroll
type: test
id: 42
body: { foo: 1 }
- do:
index:
index: test_scroll
type: test
id: 43
body: { foo: 2 }
- do:
indices.refresh: {}
- do:
search:
index: test_scroll
size: 1
scroll: 1m
sort: foo
body:
query:
match_all: {}
- set: {_scroll_id: scroll_id}
- match: {hits.total: 2 }
- length: {hits.hits: 1 }
- match: {hits.hits.0._id: "42" }
- do:
index:
index: test_scroll
type: test
id: 44
body: { foo: 3 }
- do:
indices.refresh: {}
- do:
scroll:
body: { "scroll_id": "$scroll_id", "scroll": "1m"}
- match: {hits.total: 2 }
- length: {hits.hits: 1 }
- match: {hits.hits.0._id: "43" }
- do:
scroll:
scroll_id: $scroll_id
scroll: 1m
- match: {hits.total: 2 }
- length: {hits.hits: 0 }
- do:
clear_scroll:
scroll_id: $scroll_id
---
"Start scroll in mixed cluster for upgraded":
- do: - do:
indices.create: indices.create:
index: upgraded_scroll index: upgraded_scroll
wait_for_active_shards: all wait_for_active_shards: all
body: body:
settings: settings:
number_of_replicas: "0" number_of_replicas: 0
index.routing.allocation.include.upgraded: "first" index.routing.allocation.include.upgraded: true
- do: - do:
index: index:

View File

@ -1,13 +1,5 @@
--- ---
"Verify user and role in mixed cluster": "Verify user and role in mixed cluster":
- do:
headers:
Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ="
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
- match: { timed_out: false }
- do: - do:
xpack.security.get_user: xpack.security.get_user:
username: "native_user" username: "native_user"
@ -36,6 +28,3 @@
username: "kibana,logstash_system" username: "kibana,logstash_system"
- match: { kibana.enabled: false } - match: { kibana.enabled: false }
- match: { logstash_system.enabled: true } - match: { logstash_system.enabled: true }

View File

@ -1,10 +1,3 @@
---
setup:
- do:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
--- ---
"Test get old cluster job": "Test get old cluster job":
- skip: - skip:

View File

@ -1,9 +1,3 @@
setup:
- do:
cluster.health:
wait_for_status: yellow
wait_for_nodes: 2
--- ---
"Test old cluster datafeed": "Test old cluster datafeed":
- do: - do:

View File

@ -1,31 +0,0 @@
---
"Index data and search on the old cluster":
- do:
indices.create:
index: test_index
wait_for_active_shards : all
body:
settings:
index:
number_of_replicas: 1
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v1_old", "f2": 0}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v2_old", "f2": 1}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v3_old", "f2": 2}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v4_old", "f2": 3}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v5_old", "f2": 4}'
- do:
search:
index: test_index
- match: { hits.total: 5 }

View File

@ -1,42 +1,5 @@
--- ---
"Index data and search on the upgraded cluster": "Continue scroll after upgrade":
- do:
cluster.health:
wait_for_status: green
wait_for_nodes: 2
# wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s
level: shards
- do:
search:
index: test_index
- match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v1_upgraded", "f2": 10}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v2_upgraded", "f2": 11}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v3_upgraded", "f2": 12}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v4_upgraded", "f2": 13}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v5_upgraded", "f2": 14}'
- do:
search:
index: test_index
- match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
---
"Get indexed scroll and execute scroll":
- do: - do:
get: get:
index: scroll_index index: scroll_index

View File

@ -5,7 +5,7 @@
Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ="
cluster.health: cluster.health:
wait_for_status: green wait_for_status: green
wait_for_nodes: 2 wait_for_nodes: 3
# wait for long enough that we give delayed unassigned shards to stop being delayed # wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s timeout: 70s
- match: { timed_out: false } - match: { timed_out: false }
@ -22,4 +22,3 @@
- match: { native_role.cluster.0: "all" } - match: { native_role.cluster.0: "all" }
- match: { native_role.indices.0.names.0: "test_index" } - match: { native_role.indices.0.names.0: "test_index" }
- match: { native_role.indices.0.privileges.0: "all" } - match: { native_role.indices.0.privileges.0: "all" }

View File

@ -2,7 +2,7 @@ setup:
- do: - do:
cluster.health: cluster.health:
wait_for_status: green wait_for_status: green
wait_for_nodes: 2 wait_for_nodes: 3
# wait for long enough that we give delayed unassigned shards to stop being delayed # wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s timeout: 70s

View File

@ -2,7 +2,7 @@ setup:
- do: - do:
cluster.health: cluster.health:
wait_for_status: green wait_for_status: green
wait_for_nodes: 2 wait_for_nodes: 3
# wait for long enough that we give delayed unassigned shards to stop being delayed # wait for long enough that we give delayed unassigned shards to stop being delayed
timeout: 70s timeout: 70s
@ -97,4 +97,3 @@ setup:
xpack.ml.delete_job: xpack.ml.delete_job:
job_id: mixed-cluster-datafeed-job job_id: mixed-cluster-datafeed-job
- match: { acknowledged: true } - match: { acknowledged: true }