mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 02:14:54 +00:00
Due to recent changes are done for converting `repository-hdfs` to test clusters (#41252), the `integTestSecure*` tasks did not depend on `secureHdfsFixture` which when running would fail as the fixture would not be available. This commit adds the dependency of the fixture to the task. The `secureHdfsFixture` is a `AntFixture` which is spawned a process. Internally it waits for 30 seconds for the resources to be made available. For my local machine, it took almost 45 seconds to be available so I have added the wait time as an input to the `AntFixture` defaults to 30 seconds and set it to 60 seconds in case of secure hdfs fixture. The integ test for secure hdfs was disabled for a long time and so the changes done in #42090 to fix the tests are also done in this commit.
This commit is contained in:
parent
263f76e5ea
commit
91c342a888
@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture {
|
||||
@Input
|
||||
boolean useShell = false
|
||||
|
||||
@Input
|
||||
int maxWaitInSeconds = 30
|
||||
|
||||
/**
|
||||
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
|
||||
* It is protected so subclasses can override (eg RunTask).
|
||||
@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture {
|
||||
|
||||
String failedProp = "failed${name}"
|
||||
// first wait for resources, or the failure marker from the wrapper script
|
||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
||||
ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
||||
or {
|
||||
resourceexists {
|
||||
file(file: failureMarker.toString())
|
||||
|
@ -91,13 +91,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
|
||||
dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }"
|
||||
maxWaitInSeconds 60
|
||||
onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled }
|
||||
waitCondition = { fixture, ant ->
|
||||
// the hdfs.MiniHDFS fixture writes the ports file when
|
||||
// it's ready, so we can just wait for the file to exist
|
||||
return fixture.portsFile.exists()
|
||||
}
|
||||
|
||||
final List<String> miniHDFSArgs = []
|
||||
|
||||
// If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options
|
||||
@ -130,7 +130,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
|
||||
}
|
||||
}
|
||||
|
||||
Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa']
|
||||
Set disabledIntegTestTaskNames = []
|
||||
|
||||
for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
|
||||
task "${integTestTaskName}"(type: RestIntegTestTask) {
|
||||
@ -141,10 +141,35 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec
|
||||
enabled = false;
|
||||
}
|
||||
|
||||
if (integTestTaskName.contains("Secure")) {
|
||||
if (integTestTaskName.contains("Ha")) {
|
||||
dependsOn secureHaHdfsFixture
|
||||
} else {
|
||||
dependsOn secureHdfsFixture
|
||||
}
|
||||
}
|
||||
|
||||
runner {
|
||||
if (integTestTaskName.contains("Ha")) {
|
||||
if (integTestTaskName.contains("Secure")) {
|
||||
Path path = buildDir.toPath()
|
||||
.resolve("fixtures")
|
||||
.resolve("secureHaHdfsFixture")
|
||||
.resolve("ports")
|
||||
nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
|
||||
classpath += files(path)
|
||||
} else {
|
||||
Path path = buildDir.toPath()
|
||||
.resolve("fixtures")
|
||||
.resolve("haHdfsFixture")
|
||||
.resolve("ports")
|
||||
nonInputProperties.systemProperty "test.hdfs-fixture.ports", path
|
||||
classpath += files(path)
|
||||
}
|
||||
}
|
||||
|
||||
if (integTestTaskName.contains("Secure")) {
|
||||
if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) {
|
||||
dependsOn secureHdfsFixture
|
||||
nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
|
||||
nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
|
||||
jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
|
||||
|
@ -19,16 +19,6 @@
|
||||
|
||||
package org.elasticsearch.repositories.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedActionException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.BadFencingConfigurationException;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
@ -46,6 +36,16 @@ import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedActionException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Integration test that runs against an HA-Enabled HDFS instance
|
||||
*/
|
||||
@ -57,13 +57,24 @@ public class HaHdfsFailoverTestSuiteIT extends ESRestTestCase {
|
||||
String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
|
||||
String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
|
||||
String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
|
||||
String ports = System.getProperty("test.hdfs-fixture.ports");
|
||||
String nn1Port = "10001";
|
||||
String nn2Port = "10002";
|
||||
if (ports.length() > 0) {
|
||||
final Path path = PathUtils.get(ports);
|
||||
final List<String> lines = AccessController.doPrivileged((PrivilegedExceptionAction<List<String>>) () -> {
|
||||
return Files.readAllLines(path);
|
||||
});
|
||||
nn1Port = lines.get(0);
|
||||
nn2Port = lines.get(1);
|
||||
}
|
||||
boolean securityEnabled = hdfsKerberosPrincipal != null;
|
||||
|
||||
Configuration hdfsConfiguration = new Configuration();
|
||||
hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
|
||||
hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
|
||||
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
|
||||
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
|
||||
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port);
|
||||
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port);
|
||||
hdfsConfiguration.set(
|
||||
"dfs.client.failover.proxy.provider.ha-hdfs",
|
||||
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
|
||||
@ -110,8 +121,8 @@ public class HaHdfsFailoverTestSuiteIT extends ESRestTestCase {
|
||||
securityCredentials(securityEnabled, esKerberosPrincipal) +
|
||||
"\"conf.dfs.nameservices\": \"ha-hdfs\"," +
|
||||
"\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," +
|
||||
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," +
|
||||
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," +
|
||||
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," +
|
||||
"\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," +
|
||||
"\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " +
|
||||
"\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" +
|
||||
"}" +
|
||||
|
@ -109,8 +109,8 @@ public class MiniHDFS {
|
||||
String haNameService = System.getProperty("ha-nameservice");
|
||||
boolean haEnabled = haNameService != null;
|
||||
if (haEnabled) {
|
||||
MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001);
|
||||
MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002);
|
||||
MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0);
|
||||
MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0);
|
||||
MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2);
|
||||
MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice);
|
||||
builder.nnTopology(namenodeTopology);
|
||||
|
Loading…
x
Reference in New Issue
Block a user