[Monitoring] Use Low Level REST Client for HTTP Exporter

This rewrites the HTTP Exporter to use the REST client underneath. Functionality is improved in resource blocking (templates and pipelines existing) and the majority of the code fundamentall simplified by removing direct HTTP calls.

This is blocked by the SSLService pull request. After that is merged, the I will update this PR to reflect those changes and it could possibly allow us to remove the security privileges required for monitoring.

Original commit: elastic/x-pack-elasticsearch@1ad25f17f8
This commit is contained in:
Chris Earle 2016-08-22 20:29:31 -04:00
parent 41334abda0
commit 5d5a9afb57
52 changed files with 5364 additions and 1952 deletions

View File

@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -22,7 +23,6 @@ import org.junit.Before;
import org.junit.BeforeClass;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
@ -83,12 +83,9 @@ public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase {
@Before
public void enableExporter() throws Exception {
InetSocketAddress httpAddress = randomFrom(httpAddresses());
URI uri = new URI("https", null, httpAddress.getHostString(), httpAddress.getPort(), "/", null, null);
Settings exporterSettings = Settings.builder()
.put("xpack.monitoring.exporters._http.enabled", true)
.put("xpack.monitoring.exporters._http.host", uri.toString())
.put("xpack.monitoring.exporters._http.host", "https://" + NetworkAddress.format(randomFrom(httpAddresses())))
.build();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
}

View File

@ -51,6 +51,10 @@ dependencies {
// needed for subethasmtp, has @GuardedBy annotation
testCompile 'com.google.code.findbugs:jsr305:3.0.1'
// monitoring deps
compile "org.elasticsearch.client:rest:${version}"
compile "org.elasticsearch.client:sniffer:${version}"
// common test deps
testCompile 'org.elasticsearch:securemock:1.2'
testCompile 'org.slf4j:slf4j-log4j12:1.6.2'

View File

@ -12,7 +12,6 @@ import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.license.LicenseService;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
@ -63,15 +62,13 @@ public class Monitoring implements ActionPlugin {
public static final String NAME = "monitoring";
private final Settings settings;
private final Environment env;
private final XPackLicenseState licenseState;
private final boolean enabled;
private final boolean transportClientMode;
private final boolean tribeNode;
public Monitoring(Settings settings, Environment env, XPackLicenseState licenseState) {
public Monitoring(Settings settings, XPackLicenseState licenseState) {
this.settings = settings;
this.env = env;
this.licenseState = licenseState;
this.enabled = XPackSettings.MONITORING_ENABLED.get(settings);
this.transportClientMode = XPackPlugin.transportClientMode(settings);
@ -107,10 +104,10 @@ public class Monitoring implements ActionPlugin {
final MonitoringSettings monitoringSettings = new MonitoringSettings(settings, clusterSettings);
final CleanerService cleanerService = new CleanerService(settings, clusterSettings, threadPool, licenseState);
// TODO do exporters and their ssl config really need to be dynamic? https://github.com/elastic/x-plugins/issues/3117
// TODO: https://github.com/elastic/x-plugins/issues/3117 (remove dynamic need with static exporters)
final SSLService dynamicSSLService = sslService.createDynamicSSLService();
Map<String, Exporter.Factory> exporterFactories = new HashMap<>();
exporterFactories.put(HttpExporter.TYPE, config -> new HttpExporter(config, env, dynamicSSLService));
exporterFactories.put(HttpExporter.TYPE, config -> new HttpExporter(config, dynamicSSLService));
exporterFactories.put(LocalExporter.TYPE, config -> new LocalExporter(config, client, clusterService, cleanerService));
final Exporters exporters = new Exporters(settings, exporterFactories, clusterService);

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.monitoring.exporter;
import java.util.Collection;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;
/**
@ -18,11 +19,15 @@ public abstract class ExportBulk {
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZING);
public ExportBulk(String name) {
this.name = name;
this.name = Objects.requireNonNull(name);
}
@Override
public String toString() {
/**
* Get the name used for any logging messages.
*
* @return Never {@code null}.
*/
public String getName() {
return name;
}

View File

@ -5,11 +5,7 @@
*/
package org.elasticsearch.xpack.monitoring.exporter;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
@ -25,7 +21,6 @@ public abstract class Exporter implements AutoCloseable {
public static final String EXPORT_PIPELINE_NAME = "xpack_monitoring_" + MonitoringTemplateUtils.TEMPLATE_VERSION;
public static final String INDEX_NAME_TIME_FORMAT_SETTING = "index.name.time_format";
public static final String BULK_TIMEOUT_SETTING = "bulk.timeout";
/**
* Every {@code Exporter} adds the ingest pipeline to bulk requests, but they should, at the exporter level, allow that to be disabled.
* <p>
@ -34,16 +29,11 @@ public abstract class Exporter implements AutoCloseable {
public static final String USE_INGEST_PIPELINE_SETTING = "use_ingest";
protected final Config config;
protected final Logger logger;
@Nullable protected final TimeValue bulkTimeout;
private AtomicBoolean closed = new AtomicBoolean(false);
public Exporter(Config config) {
this.config = config;
this.logger = config.logger(getClass());
this.bulkTimeout = config.settings().getAsTime(BULK_TIMEOUT_SETTING, null);
}
public String name() {
@ -82,7 +72,11 @@ public abstract class Exporter implements AutoCloseable {
protected abstract void doClose();
protected String settingFQN(String setting) {
protected static String settingFQN(final Config config) {
return MonitoringSettings.EXPORTERS_SETTINGS.getKey() + config.name;
}
protected static String settingFQN(final Config config, final String setting) {
return MonitoringSettings.EXPORTERS_SETTINGS.getKey() + config.name + "." + setting;
}
@ -119,13 +113,11 @@ public abstract class Exporter implements AutoCloseable {
private final String name;
private final String type;
private final boolean enabled;
private final Settings globalSettings;
private final Settings settings;
public Config(String name, String type, Settings globalSettings, Settings settings) {
public Config(String name, String type, Settings settings) {
this.name = name;
this.type = type;
this.globalSettings = globalSettings;
this.settings = settings;
this.enabled = settings.getAsBoolean("enabled", true);
}
@ -146,9 +138,6 @@ public abstract class Exporter implements AutoCloseable {
return settings;
}
public Logger logger(Class clazz) {
return Loggers.getLogger(clazz, globalSettings, name);
}
}
/** A factory for constructing {@link Exporter} instances.*/

View File

@ -13,7 +13,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.node.Node;
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
import org.elasticsearch.xpack.monitoring.exporter.local.LocalExporter;
@ -117,11 +116,6 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
}
Map<String, Exporter> initExporters(Settings settings) {
Settings globalSettings = Settings.builder()
.put(settings)
.put(Node.NODE_NAME_SETTING.getKey(), nodeName())
.build();
Set<String> singletons = new HashSet<>();
Map<String, Exporter> exporters = new HashMap<>();
boolean hasDisabled = false;
@ -135,7 +129,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
if (factory == null) {
throw new SettingsException("unknown exporter type [" + type + "] set for exporter [" + name + "]");
}
Exporter.Config config = new Exporter.Config(name, type, globalSettings, exporterSettings);
Exporter.Config config = new Exporter.Config(name, type, exporterSettings);
if (!config.enabled()) {
hasDisabled = true;
if (logger.isDebugEnabled()) {
@ -162,8 +156,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
// fallback on the default
//
if (exporters.isEmpty() && !hasDisabled) {
Exporter.Config config = new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE,
globalSettings, Settings.EMPTY);
Exporter.Config config = new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE, Settings.EMPTY);
exporters.put(config.name(), factories.get(LocalExporter.TYPE).create(config));
}

View File

@ -0,0 +1,156 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.monitoring.exporter.ExportBulk;
import org.elasticsearch.xpack.monitoring.exporter.ExportException;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
/**
* {@code HttpExportBulk} uses the {@link RestClient} to perform a bulk operation against the remote cluster.
*/
class HttpExportBulk extends ExportBulk {
private static final Logger logger = Loggers.getLogger(HttpExportBulk.class);
/**
* The {@link RestClient} managed by the {@link HttpExporter}.
*/
private final RestClient client;
/**
* The querystring parameters to pass along with every bulk request.
*/
private final Map<String, String> params;
/**
* Resolvers are used to render monitoring documents into JSON.
*/
private final ResolversRegistry registry;
/**
* The bytes payload that represents the bulk body is created via {@link #doAdd(Collection)}.
*/
private byte[] payload = null;
public HttpExportBulk(final String name, final RestClient client, final Map<String, String> parameters,
final ResolversRegistry registry) {
super(name);
this.client = client;
this.params = parameters;
this.registry = registry;
}
@Override
public void doAdd(Collection<MonitoringDoc> docs) throws ExportException {
try {
if (docs != null && docs.isEmpty() == false) {
try (final BytesStreamOutput payload = new BytesStreamOutput()) {
for (MonitoringDoc monitoringDoc : docs) {
// any failure caused by an individual doc will be written as an empty byte[], thus not impacting the rest
payload.write(toBulkBytes(monitoringDoc));
}
// store the payload until we flush
this.payload = BytesReference.toBytes(payload.bytes());
}
}
} catch (Exception e) {
throw new ExportException("failed to add documents to export bulk [{}]", e, name);
}
}
@Override
public void doFlush() throws ExportException {
if (payload == null) {
throw new ExportException("unable to send documents because none were loaded for export bulk [{}]", name);
} else if (payload.length != 0) {
final HttpEntity body = new ByteArrayEntity(payload, ContentType.APPLICATION_JSON);
client.performRequestAsync("POST", "/_bulk", params, body, HttpExportBulkResponseListener.INSTANCE);
// free the memory
payload = null;
}
}
@Override
protected void doClose() {
// nothing serious to do at this stage
assert payload == null;
}
private byte[] toBulkBytes(final MonitoringDoc doc) throws IOException {
final XContentType xContentType = XContentType.JSON;
final XContent xContent = xContentType.xContent();
try (final BytesStreamOutput out = new BytesStreamOutput()) {
MonitoringIndexNameResolver<MonitoringDoc> resolver = registry.getResolver(doc);
if (resolver != null) {
String index = resolver.index(doc);
String type = resolver.type(doc);
String id = resolver.id(doc);
try (XContentBuilder builder = new XContentBuilder(xContent, out)) {
// Builds the bulk action metadata line
builder.startObject();
builder.startObject("index");
builder.field("_index", index);
builder.field("_type", type);
if (id != null) {
builder.field("_id", id);
}
builder.endObject();
builder.endObject();
}
// Adds action metadata line bulk separator
out.write(xContent.streamSeparator());
// Render the monitoring document
BytesRef bytesRef = resolver.source(doc, xContentType).toBytesRef();
out.write(bytesRef.bytes, bytesRef.offset, bytesRef.length);
// Adds final bulk separator
out.write(xContent.streamSeparator());
logger.trace("added index request [index={}, type={}, id={}]", index, type, id);
} else {
logger.error("no resolver found for monitoring document [class={}, id={}, version={}]",
doc.getClass().getName(), doc.getMonitoringId(), doc.getMonitoringVersion());
}
return BytesReference.toBytes(out.bytes());
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to render document [{}], skipping it [{}]", doc, name), e);
return BytesRef.EMPTY_BYTES;
}
}
}

View File

@ -0,0 +1,122 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseListener;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Objects;
/**
* {@code HttpExportBulkResponseListener} logs issues based on the response, but otherwise does nothing else.
*/
class HttpExportBulkResponseListener implements ResponseListener {
private static final Logger logger = Loggers.getLogger(HttpExportBulkResponseListener.class);
/**
* Singleton instance.
*/
public static final HttpExportBulkResponseListener INSTANCE = new HttpExportBulkResponseListener(XContentType.JSON.xContent());
/**
* The response content type.
*/
private final XContent xContent;
/**
* Create a new {@link HttpExportBulkResponseListener}.
*
* @param xContent The {@code XContent} to use for parsing the response.
*/
HttpExportBulkResponseListener(final XContent xContent) {
this.xContent = Objects.requireNonNull(xContent);
}
/**
* Success is relative with bulk responses because unless it's rejected outright, it returns with a 200.
* <p>
* Individual documents can fail and since we know how we're making them, that means that .
*/
@Override
public void onSuccess(final Response response) {
try (final XContentParser parser = xContent.createParser(response.getEntity().getContent())) {
// avoid parsing the entire payload if we don't need too
XContentParser.Token token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("errors".equals(currentFieldName)) {
// no errors? then we can stop looking
if (parser.booleanValue() == false) {
return;
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
// note: this assumes that "items" is the only array portion of the response (currently true)
parseErrors(parser);
return;
}
}
}
} catch (IOException | RuntimeException e) {
onError("unexpected exception while verifying bulk response", e);
}
}
/**
* Logs every <code>error</code> field's value until it hits the end of an array.
*
* @param parser The bulk response parser
* @throws IOException if any parsing error occurs
*/
private void parseErrors(final XContentParser parser) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("error".equals(currentFieldName)) {
onItemError(parser.text());
}
}
}
}
/**
* Log obvious failures.
* <p>
* In the future, we should queue replayable failures.
*/
@Override
public void onFailure(final Exception exception) {
// queueable exceptions:
// - RestStatus.TOO_MANY_REQUESTS.getStatus()
// - possibly other, non-ResponseExceptions
onError("bulk request failed unexpectedly", exception);
}
void onError(final String msg, final Throwable cause) {
logger.warn(msg, cause);
}
void onItemError(final String text) {
logger.warn("unexpected error while indexing monitoring document: [{}]", text);
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
public class HttpExporterUtils {
public static URL parseHostWithPath(String host, String path) throws URISyntaxException, MalformedURLException {
if (!host.contains("://")) {
// prefix with http
host = "http://" + host;
}
if (!host.endsWith("/")) {
// make sure we can safely resolves sub paths and not replace parent folders
host = host + "/";
}
URL hostUrl = new URL(host);
if (hostUrl.getPort() == -1) {
// url has no port, default to 9200 - sadly we need to rebuild..
StringBuilder newUrl = new StringBuilder(hostUrl.getProtocol() + "://");
newUrl.append(hostUrl.getHost()).append(":9200").append(hostUrl.toURI().getPath());
hostUrl = new URL(newUrl.toString());
}
return new URL(hostUrl, path);
}
}

View File

@ -0,0 +1,227 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Objects;
/**
* {@code HttpHostBuilder} creates an {@link HttpHost} meant to be used with an Elasticsearch cluster. The {@code HttpHostBuilder} uses
* defaults that are most common for Elasticsearch, including an unspecified port defaulting to <code>9200</code> and the default scheme
* being <code>http</code> (as opposed to <code>https</code>).
* <p>
* The only <em>required</em> detail is the host to connect too, either via hostname or IP address.
* <p>
* This enables you to create an {@code HttpHost} directly via a builder mechanism, or indirectly by parsing a URI-like string. For example:
* <pre><code>
* HttpHost host1 = HttpHostBuilder.builder("localhost").build(); // http://localhost:9200
* HttpHost host2 = HttpHostBuilder.builder("localhost:9200").build(); // http://localhost:9200
* HttpHost host4 = HttpHostBuilder.builder("http://localhost:9200").build(); // http://localhost:9200
* HttpHost host5 = HttpHostBuilder.builder("https://localhost:9200").build(); // https://localhost:9200
* HttpHost host6 = HttpHostBuilder.builder("https://localhost:9200").build(); // https://127.0.0.1:9200 (IPv4 localhost)
* HttpHost host7 = HttpHostBuilder.builder("http://10.1.2.3").build(); // http://10.2.3.4:9200
* HttpHost host8 = HttpHostBuilder.builder("https://[::1]").build(); // http://[::1]:9200 (IPv6 localhost)
* HttpHost host9 = HttpHostBuilder.builder("https://[::1]:9200").build(); // http://[::1]:9200 (IPv6 localhost)
* HttpHost host10= HttpHostBuilder.builder("https://sub.domain").build(); // https://sub.domain:9200
* </code></pre>
* Note: {@code HttpHost}s are the mechanism that the {@link RestClient} uses to build the base request. If you need to specify proxy
* settings, then use the {@link RestClientBuilder.RequestConfigCallback} to configure the {@code Proxy} settings.
*
* @see #builder(String)
* @see #builder()
*/
public class HttpHostBuilder {
/**
* The scheme used to connect to Elasticsearch.
*/
private Scheme scheme = Scheme.HTTP;
/**
* The host is the only required portion of the supplied URI when building it. The rest can be defaulted.
*/
private String host = null;
/**
* The port used to connect to Elasticsearch.
* <p>
* The default port is 9200 when unset.
*/
private int port = -1;
/**
* Create an empty {@link HttpHostBuilder}.
* <p>
* The expectation is that you then explicitly build the {@link HttpHost} piece-by-piece.
* <p>
* For example:
* <pre><code>
* HttpHost localhost = HttpHostBuilder.builder().host("localhost").build(); // http://localhost:9200
* HttpHost explicitLocalhost = HttpHostBuilder.builder.().scheme(Scheme.HTTP).host("localhost").port(9200).build();
* // http://localhost:9200
* HttpHost secureLocalhost = HttpHostBuilder.builder().scheme(Scheme.HTTPS).host("localhost").build(); // https://localhost:9200
* HttpHost differentPort = HttpHostBuilder.builder().host("my_host").port(19200).build(); // https://my_host:19200
* HttpHost ipBased = HttpHostBuilder.builder().host("192.168.0.11").port(80).build(); // https://192.168.0.11:80
* </code></pre>
*
* @return Never {@code null}.
*/
public static HttpHostBuilder builder() {
return new HttpHostBuilder();
}
/**
* Create an empty {@link HttpHostBuilder}.
* <p>
* The expectation is that you then explicitly build the {@link HttpHost} piece-by-piece.
* <p>
* For example:
* <pre><code>
* HttpHost localhost = HttpHostBuilder.builder("localhost").build(); // http://localhost:9200
* HttpHost explicitLocalhost = HttpHostBuilder.builder("http://localhost:9200").build(); // http://localhost:9200
* HttpHost secureLocalhost = HttpHostBuilder.builder("https://localhost").build(); // https://localhost:9200
* HttpHost differentPort = HttpHostBuilder.builder("my_host:19200").build(); // http://my_host:19200
* HttpHost ipBased = HttpHostBuilder.builder("192.168.0.11:80").build(); // http://192.168.0.11:80
* </code></pre>
*
* @return Never {@code null}.
* @throws NullPointerException if {@code uri} is {@code null}.
* @throws IllegalArgumentException if any issue occurs while parsing the {@code uri}.
*/
public static HttpHostBuilder builder(final String uri) {
return new HttpHostBuilder(uri);
}
/**
* Create a new {@link HttpHost} from scratch.
*/
HttpHostBuilder() {
// everything is in the default state
}
/**
* Create a new {@link HttpHost} based on the supplied host.
*
* @param uri The [partial] URI used to build.
* @throws NullPointerException if {@code uri} is {@code null}.
* @throws IllegalArgumentException if any issue occurs while parsing the {@code uri}.
*/
HttpHostBuilder(final String uri) {
Objects.requireNonNull(uri, "uri must not be null");
try {
String cleanedUri = uri;
if (uri.contains("://") == false) {
cleanedUri = "http://" + uri;
}
final URI parsedUri = new URI(cleanedUri);
// "localhost:9200" doesn't have a scheme
if (parsedUri.getScheme() != null) {
scheme(Scheme.fromString(parsedUri.getScheme()));
}
if (parsedUri.getHost() != null) {
host(parsedUri.getHost());
} else {
// if the host is null, then it means one of two things: we're in a broken state _or_ it had something like underscores
// we want the raw form so that parts of the URI are not decoded
final String host = parsedUri.getRawAuthority();
// they explicitly provided the port, which is unparsed when the host is null
if (host.contains(":")) {
final String[] hostPort = host.split(":", 2);
host(hostPort[0]);
port(Integer.parseInt(hostPort[1]));
} else {
host(host);
}
}
if (parsedUri.getPort() != -1) {
port(parsedUri.getPort());
}
// fail for proxies
if (parsedUri.getRawPath() != null && parsedUri.getRawPath().isEmpty() == false) {
throw new IllegalArgumentException(
"HttpHosts do not use paths [" + parsedUri.getRawPath() +
"]. see setRequestConfigCallback for proxies. value: [" + uri + "]");
}
} catch (URISyntaxException | IndexOutOfBoundsException | NullPointerException e) {
throw new IllegalArgumentException("error parsing host: [" + uri + "]", e);
}
}
/**
* Set the scheme (aka protocol) for the {@link HttpHost}.
*
* @param scheme The scheme to use.
* @return Always {@code this}.
* @throws NullPointerException if {@code scheme} is {@code null}.
*/
public HttpHostBuilder scheme(final Scheme scheme) {
this.scheme = Objects.requireNonNull(scheme);
return this;
}
/**
* Set the host for the {@link HttpHost}.
* <p>
* This does not attempt to parse the {@code host} in any way.
*
* @param host The host to use.
* @return Always {@code this}.
* @throws NullPointerException if {@code host} is {@code null}.
*/
public HttpHostBuilder host(final String host) {
this.host = Objects.requireNonNull(host);
return this;
}
/**
* Set the port for the {@link HttpHost}.
* <p>
* Specifying the {@code port} as -1 will cause it to be defaulted to 9200 when the {@code HttpHost} is built.
*
* @param port The port to use.
* @return Always {@code this}.
* @throws IllegalArgumentException if the {@code port} is not -1 or [1, 65535].
*/
public HttpHostBuilder port(final int port) {
// setting a port to 0 makes no sense when you're the client; -1 allows us to use the default when we build
if (port != -1 && (port < 1 || port > 65535)) {
throw new IllegalArgumentException("port must be -1 for the default or [1, 65535]. was: " + port);
}
this.port = port;
return this;
}
/**
* Create a new {@link HttpHost} from the current {@code scheme}, {@code host}, and {@code port}.
*
* @return Never {@code null}.
* @throws IllegalStateException if {@code host} is unset.
*/
public HttpHost build() {
if (host == null) {
throw new IllegalStateException("host must be set");
}
return new HttpHost(host, port == -1 ? 9200 : port, scheme.toString());
}
}

View File

@ -0,0 +1,172 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.client.RestClient;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicReference;
/**
* An {@code HttpResource} is some "thing" that needs to exist on the other side. If it does not exist, then follow-on actions cannot
* occur.
* <p>
* {@code HttpResource}s can assume that, as long as the connection stays active, then a verified resource should continue to exist on the
* other side.
*
* @see MultiHttpResource
* @see PublishableHttpResource
*/
public abstract class HttpResource {
/**
* The current state of the {@link HttpResource}.
*/
enum State {
/**
* The resource is ready to use.
*/
CLEAN,
/**
* The resource is being checked right now to see if it can be used.
*/
CHECKING,
/**
* The resource needs to be checked before it can be used.
*/
DIRTY
}
/**
* The user-recognizable name for whatever owns this {@link HttpResource}.
*/
protected final String resourceOwnerName;
/**
* The current state of the resource, which helps to determine if it needs to be checked.
*/
protected final AtomicReference<State> state;
/**
* Create a new {@link HttpResource} that {@linkplain #isDirty() is dirty}.
*
* @param resourceOwnerName The user-recognizable name
*/
protected HttpResource(final String resourceOwnerName) {
this(resourceOwnerName, true);
}
/**
* Create a new {@link HttpResource} that is {@code dirty}.
*
* @param resourceOwnerName The user-recognizable name
* @param dirty Whether the resource is dirty or not
*/
protected HttpResource(final String resourceOwnerName, final boolean dirty) {
this.resourceOwnerName = Objects.requireNonNull(resourceOwnerName);
this.state = new AtomicReference<>(dirty ? State.DIRTY : State.CLEAN);
}
/**
* Get the resource owner for this {@link HttpResource}.
*
* @return Never {@code null}.
*/
public String getResourceOwnerName() {
return resourceOwnerName;
}
/**
* Determine if the resource needs to be checked.
*
* @return {@code true} to indicate that the resource should block follow-on actions that require it.
* @see #checkAndPublish(RestClient)
*/
public boolean isDirty() {
return state.get() != State.CLEAN;
}
/**
* Mark the resource as {@linkplain #isDirty() dirty}.
*/
public final void markDirty() {
state.compareAndSet(State.CLEAN, State.DIRTY);
}
/**
* If the resource is currently {@linkplain #isDirty() dirty}, then check and, if necessary, publish this {@link HttpResource}.
* <p>
* Expected usage:
* <pre><code>
* if (resource.checkAndPublishIfDirty(client)) {
* // use client with resources having been verified
* }
* </code></pre>
*
* @param client The REST client to make the request(s).
* @return {@code true} if the resource is available for use. {@code false} to stop.
*/
public final boolean checkAndPublishIfDirty(final RestClient client) {
final State state = this.state.get();
// get in line and wait until the check passes or fails if it's checking now, or start checking
return state == State.CLEAN || blockUntilCheckAndPublish(client);
}
/**
* Invoked by {@link #checkAndPublishIfDirty(RestClient)} to block incase {@link #checkAndPublish(RestClient)} is in the middle of
* {@linkplain State#CHECKING checking}.
* <p>
* Unlike {@link #isDirty()} and {@link #checkAndPublishIfDirty(RestClient)}, this is {@code synchronized} in order to prevent
* double-execution and it invokes {@link #checkAndPublish(RestClient)} if it's {@linkplain State#DIRTY dirty}.
*
* @param client The REST client to make the request(s).
* @return {@code true} if the resource is available for use. {@code false} to stop.
*/
private synchronized boolean blockUntilCheckAndPublish(final RestClient client) {
final State state = this.state.get();
return state == State.CLEAN || (state == State.DIRTY && checkAndPublish(client));
}
/**
* Check and, if necessary, publish this {@link HttpResource}.
* <p>
* This will perform the check regardless of the {@linkplain #isDirty() dirtiness} and it will update the dirtiness.
* Using this directly can be useful if there is ever a need to double-check dirtiness without having to {@linkplain #markDirty() mark}
* it as dirty.
*
* @param client The REST client to make the request(s).
* @return {@code true} if the resource is available for use. {@code false} to stop.
* @see #isDirty()
*/
public final synchronized boolean checkAndPublish(final RestClient client) {
// we always check when asked, regardless of clean or dirty
state.set(State.CHECKING);
boolean success = false;
try {
success = doCheckAndPublish(client);
} finally {
// nothing else should be unsetting from CHECKING
assert state.get() == State.CHECKING;
state.set(success ? State.CLEAN : State.DIRTY);
}
return success;
}
/**
* Perform whatever is necessary to check and publish this {@link HttpResource}.
*
* @param client The REST client to make the request(s).
* @return {@code true} if the resource is available for use. {@code false} to stop.
*/
protected abstract boolean doCheckAndPublish(final RestClient client);
}

View File

@ -0,0 +1,75 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.logging.Loggers;
import java.util.Collections;
import java.util.List;
/**
* {@code MultiHttpResource} serves as a wrapper of a {@link List} of {@link HttpResource}s.
* <p>
* By telling the {@code MultiHttpResource} to become dirty, it effectively marks all of its sub-resources dirty as well.
* <p>
* Sub-resources should be the sole responsibility of the the {@code MultiHttpResource}; there should not be something using them directly
* if they are included in a {@code MultiHttpResource}.
*/
public class MultiHttpResource extends HttpResource {
private static final Logger logger = Loggers.getLogger(MultiHttpResource.class);
/**
* Sub-resources that are grouped to simplify notification.
*/
private final List<HttpResource> resources;
/**
* Create a {@link MultiHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param resources The sub-resources to aggregate.
*/
public MultiHttpResource(final String resourceOwnerName, final List<? extends HttpResource> resources) {
super(resourceOwnerName);
this.resources = Collections.unmodifiableList(resources);
}
/**
* Get the resources that are checked by this {@link MultiHttpResource}.
*
* @return Never {@code null}.
*/
public List<HttpResource> getResources() {
return resources;
}
/**
* Check and publish all {@linkplain #resources sub-resources}.
*/
@Override
protected boolean doCheckAndPublish(RestClient client) {
logger.trace("checking sub-resources existence and publishing on the [{}]", resourceOwnerName);
boolean exists = true;
// short-circuits on the first failure, thus marking the whole thing dirty
for (final HttpResource resource : resources) {
if (resource.checkAndPublish(client) == false) {
exists = false;
break;
}
}
logger.trace("all sub-resources exist [{}] on the [{}]", exists, resourceOwnerName);
return exists;
}
}

View File

@ -0,0 +1,93 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
/**
* {@code NodeFailureListener} logs warnings for any node failure, but it can also notify a {@link Sniffer} and/or {@link HttpResource}
* upon failures as well.
* <p>
* The {@linkplain #setSniffer(Sniffer) sniffer} and {@linkplain #setResource(HttpResource) resource} are expected to be set immediately
* or not at all.
*/
class NodeFailureListener extends RestClient.FailureListener {
private static final Logger logger = Loggers.getLogger(NodeFailureListener.class);
/**
* The optional {@link Sniffer} associated with the {@link RestClient}.
*/
@Nullable
private SetOnce<Sniffer> sniffer = new SetOnce<>();
/**
* The optional {@link HttpResource} associated with the {@link RestClient}.
*/
@Nullable
private SetOnce<HttpResource> resource = new SetOnce<>();
/**
* Get the {@link Sniffer} that is notified upon node failure.
*
* @return Can be {@code null}.
*/
@Nullable
public Sniffer getSniffer() {
return sniffer.get();
}
/**
* Set the {@link Sniffer} that is notified upon node failure.
*
* @param sniffer The sniffer to notify
* @throws SetOnce.AlreadySetException if called more than once
*/
public void setSniffer(@Nullable final Sniffer sniffer) {
this.sniffer.set(sniffer);
}
/**
* Get the {@link HttpResource} that is notified upon node failure.
*
* @return Can be {@code null}.
*/
@Nullable
public HttpResource getResource() {
return resource.get();
}
/**
* Set the {@link HttpResource} that is notified upon node failure.
*
* @param resource The resource to notify
* @throws SetOnce.AlreadySetException if called more than once
*/
public void setResource(@Nullable final HttpResource resource) {
this.resource.set(resource);
}
@Override
public void onFailure(final HttpHost host) {
logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort());
final HttpResource resource = this.resource.get();
final Sniffer sniffer = this.sniffer.get();
if (resource != null) {
resource.markDirty();
}
if (sniffer != null) {
sniffer.sniffOnFailure(host);
}
}
}

View File

@ -0,0 +1,84 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Objects;
import java.util.function.Supplier;
/**
* {@code PipelineHttpResource}s allow the checking and uploading of ingest pipelines to a remote cluster.
* <p>
* In the future, we will need to also support the transformation or replacement of pipelines based on their version, but we do not need
* that functionality until some breaking change in the Monitoring API requires it.
*/
public class PipelineHttpResource extends PublishableHttpResource {
private static final Logger logger = Loggers.getLogger(PipelineHttpResource.class);
/**
* The name of the pipeline that is sent to the remote cluster.
*/
private final String pipelineName;
/**
* Provides a fully formed template (e.g., no variables that need replaced).
*/
private final Supplier<byte[]> pipeline;
/**
* Create a new {@link PipelineHttpResource}.
*
* @param resourceOwnerName The user-recognizable name
* @param masterTimeout Master timeout to use with any request.
* @param pipelineName The name of the template (e.g., ".pipeline123").
* @param pipeline The pipeline provider.
*/
public PipelineHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
final String pipelineName, final Supplier<byte[]> pipeline) {
super(resourceOwnerName, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
this.pipelineName = Objects.requireNonNull(pipelineName);
this.pipeline = Objects.requireNonNull(pipeline);
}
/**
* Determine if the current {@linkplain #pipelineName pipeline} exists.
*/
@Override
protected CheckResponse doCheck(final RestClient client) {
return checkForResource(client, logger,
"/_ingest/pipeline", pipelineName, "monitoring pipeline",
resourceOwnerName, "monitoring cluster");
}
/**
* Publish the current {@linkplain #pipelineName pipeline}.
*/
@Override
protected boolean doPublish(final RestClient client) {
return putResource(client, logger,
"/_ingest/pipeline", pipelineName, this::pipelineToHttpEntity, "monitoring pipeline",
resourceOwnerName, "monitoring cluster");
}
/**
* Create a {@link HttpEntity} for the {@link #pipeline}.
*
* @return Never {@code null}.
*/
HttpEntity pipelineToHttpEntity() {
return new ByteArrayEntity(pipeline.get(), ContentType.APPLICATION_JSON);
}
}

View File

@ -0,0 +1,257 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked <em>and</em>
* published in the event that the check does not pass.
*
* @see #doCheck(RestClient)
* @see #doPublish(RestClient)
*/
public abstract class PublishableHttpResource extends HttpResource {
/**
* {@code CheckResponse} provides a ternary state for {@link #doCheck(RestClient)}.
*/
public enum CheckResponse {
/**
* The check found the resource, so nothing needs to be published.
*/
EXISTS,
/**
* The check did not find the resource, so we need to attempt to publish it.
*/
DOES_NOT_EXIST,
/**
* The check hit an unexpected exception that should block publishing attempts until it can check again.
*/
ERROR
}
/**
* A value that will never match anything in the JSON response body, thus limiting it to "{}".
*/
public static final String FILTER_PATH_NONE = "$NONE";
/**
* Use this to avoid getting any JSON response from a request.
*/
public static final Map<String, String> NO_BODY_PARAMETERS = Collections.singletonMap("filter_path", FILTER_PATH_NONE);
/**
* The default parameters to use for any request.
*/
protected final Map<String, String> parameters;
/**
* Create a new {@link PublishableHttpResource} that {@linkplain #isDirty() is dirty}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
* @param baseParameters The base parameters to specify for the request.
*/
protected PublishableHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
final Map<String, String> baseParameters) {
this(resourceOwnerName, masterTimeout, baseParameters, true);
}
/**
* Create a new {@link PublishableHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
* @param baseParameters The base parameters to specify for the request.
* @param dirty Whether the resource is dirty or not
*/
protected PublishableHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
final Map<String, String> baseParameters, final boolean dirty) {
super(resourceOwnerName, dirty);
if (masterTimeout != null) {
final Map<String, String> parameters = new HashMap<>(baseParameters.size() + 1);
parameters.putAll(baseParameters);
parameters.put("master_timeout", masterTimeout.toString());
this.parameters = Collections.unmodifiableMap(parameters);
} else {
this.parameters = baseParameters;
}
}
/**
* Get the default parameters to use with every request.
*
* @return Never {@code null}.
*/
public Map<String, String> getParameters() {
return parameters;
}
/**
* Perform whatever is necessary to check and publish this {@link PublishableHttpResource}.
*
* @param client The REST client to make the request(s).
* @return {@code true} if the resource is available for use. {@code false} to stop.
*/
@Override
protected final boolean doCheckAndPublish(final RestClient client) {
final CheckResponse check = doCheck(client);
// errors cause a dead-stop
return check != CheckResponse.ERROR && (check == CheckResponse.EXISTS || doPublish(client));
}
/**
* Determine if the current resource exists.
* <ul>
* <li>
* {@link CheckResponse#EXISTS EXISTS} will <em>not</em> run {@link #doPublish(RestClient)} and mark this as <em>not</em> dirty.
* </li>
* <li>
* {@link CheckResponse#DOES_NOT_EXIST DOES_NOT_EXIST} will run {@link #doPublish(RestClient)}, which determines the dirtiness.
* </li>
* <li>{@link CheckResponse#ERROR ERROR} will <em>not</em> run {@link #doPublish(RestClient)} and mark this as dirty.</li>
* </ul>
*
* @param client The REST client to make the request(s).
* @return Never {@code null}.
*/
protected abstract CheckResponse doCheck(final RestClient client);
/**
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint.
* <p>
* This provides the base-level check for any resource that does not need to inspect its actual contents.
*
* @param client The REST client to make the request(s).
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
* @param resourceName The name of the resource (e.g., "template123").
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
* @return Never {@code null}.
*/
protected CheckResponse checkForResource(final RestClient client, final Logger logger,
final String resourceBasePath,
final String resourceName, final String resourceType,
final String resourceOwnerName, final String resourceOwnerType) {
logger.trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
try {
final Response response = client.performRequest("GET", resourceBasePath + "/" + resourceName, parameters);
// we don't currently check for the content because we always expect it to be the same;
// if we ever make a BWC change to any template (thus without renaming it), then we need to check the content!
if (response.getStatusLine().getStatusCode() == RestStatus.OK.getStatus()) {
logger.debug("{} [{}] found on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
return CheckResponse.EXISTS;
} else {
throw new ResponseException(response);
}
} catch (final ResponseException e) {
final int statusCode = e.getResponse().getStatusLine().getStatusCode();
// 404
if (statusCode == RestStatus.NOT_FOUND.getStatus()) {
logger.debug("{} [{}] does not exist on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
return CheckResponse.DOES_NOT_EXIST;
} else {
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to verify {} [{}] on the [{}] {} with status code [{}]",
resourceType, resourceName, resourceOwnerName, resourceOwnerType, statusCode),
e);
// weirder failure than below; block responses just like other unexpected failures
return CheckResponse.ERROR;
}
} catch (IOException | RuntimeException e) {
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to verify {} [{}] on the [{}] {}",
resourceType, resourceName, resourceOwnerName, resourceOwnerType),
e);
// do not attempt to publish the resource because we're in a broken state
return CheckResponse.ERROR;
}
}
/**
* Publish the current resource.
* <p>
* This is only invoked if {@linkplain #doCheck(RestClient) the check} fails.
*
* @param client The REST client to make the request(s).
* @return {@code true} if it exists.
*/
protected abstract boolean doPublish(final RestClient client);
/**
* Upload the {@code resourceName} to the {@code resourceBasePath} endpoint.
*
* @param client The REST client to make the request(s).
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
* @param resourceName The name of the resource (e.g., "template123").
* @param body The {@link HttpEntity} that makes up the body of the request.
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
*/
protected boolean putResource(final RestClient client, final Logger logger,
final String resourceBasePath,
final String resourceName, final java.util.function.Supplier<HttpEntity> body,
final String resourceType,
final String resourceOwnerName, final String resourceOwnerType) {
logger.trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
boolean success = false;
try {
final Response response = client.performRequest("PUT", resourceBasePath + "/" + resourceName, parameters, body.get());
final int statusCode = response.getStatusLine().getStatusCode();
// 200 or 201
if (statusCode == RestStatus.OK.getStatus() || statusCode == RestStatus.CREATED.getStatus()) {
logger.debug("{} [{}] uploaded to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
success = true;
} else {
throw new RuntimeException("[" + resourceBasePath + "/" + resourceName + "] responded with [" + statusCode + "]");
}
} catch (IOException | RuntimeException e) {
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to upload {} [{}] on the [{}] {}",
resourceType, resourceName, resourceOwnerName, resourceOwnerType),
e);
}
return success;
}
}

View File

@ -0,0 +1,65 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.client.RestClient;
import java.util.Locale;
/**
* {@code Scheme} provides the list of supported {@code URI} schemes (aka protocols) for working with Elasticsearch via the
* {@link RestClient}.
*
* @see HttpHostBuilder
*/
public enum Scheme {
/**
* HTTP is the default {@linkplain Scheme scheme} used by Elasticsearch.
*/
HTTP("http"),
/**
* HTTPS is the secure form of {@linkplain #HTTP http}, which requires that Elasticsearch be using X-Pack Security with TLS/SSL or
* a similar securing mechanism.
*/
HTTPS("https");
private final String scheme;
Scheme(final String scheme) {
this.scheme = scheme;
}
@Override
public String toString() {
return scheme;
}
/**
* Determine the {@link Scheme} from the {@code scheme}.
* <pre><code>
* Scheme http = Scheme.fromString("http");
* Scheme https = Scheme.fromString("https");
* Scheme httpsCaps = Scheme.fromString("HTTPS"); // same as https
* </code></pre>
*
* @param scheme The scheme to check.
* @return Never {@code null}.
* @throws NullPointerException if {@code scheme} is {@code null}.
* @throws IllegalArgumentException if the {@code scheme} is not supported.
*/
public static Scheme fromString(final String scheme) {
switch (scheme.toLowerCase(Locale.ROOT)) {
case "http":
return HTTP;
case "https":
return HTTPS;
}
throw new IllegalArgumentException("unsupported scheme: [" + scheme + "]");
}
}

View File

@ -0,0 +1,87 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.Nullable;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
import java.util.Objects;
/**
* {@code SecurityHttpClientConfigCallback} configures a {@link RestClient} for user authentication and SSL / TLS.
*/
class SecurityHttpClientConfigCallback implements RestClientBuilder.HttpClientConfigCallback {
/**
* The optional {@link CredentialsProvider} for all requests to enable user authentication.
*/
@Nullable
private final CredentialsProvider credentialsProvider;
/**
* The {@link SSLIOSessionStrategy} for all requests to enable SSL / TLS encryption.
*/
private final SSLIOSessionStrategy sslStrategy;
/**
* Create a new {@link SecurityHttpClientConfigCallback}.
*
* @param credentialsProvider The credential provider, if a username/password have been supplied
* @param sslStrategy The SSL strategy, if SSL / TLS have been supplied
* @throws NullPointerException if {@code sslStrategy} is {@code null}
*/
SecurityHttpClientConfigCallback(final SSLIOSessionStrategy sslStrategy,
@Nullable final CredentialsProvider credentialsProvider) {
this.sslStrategy = Objects.requireNonNull(sslStrategy);
this.credentialsProvider = credentialsProvider;
}
/**
* Get the {@link CredentialsProvider} that will be added to the HTTP client.
*
* @return Can be {@code null}.
*/
@Nullable
CredentialsProvider getCredentialsProvider() {
return credentialsProvider;
}
/**
* Get the {@link SSLIOSessionStrategy} that will be added to the HTTP client.
*
* @return Never {@code null}.
*/
SSLIOSessionStrategy getSSLStrategy() {
return sslStrategy;
}
/**
* Sets the {@linkplain HttpAsyncClientBuilder#setDefaultCredentialsProvider(CredentialsProvider) credential provider},
* {@linkplain HttpAsyncClientBuilder#setSSLContext(SSLContext) SSL context}, and
* {@linkplain HttpAsyncClientBuilder#setSSLHostnameVerifier(HostnameVerifier) SSL Hostname Verifier}.
*
* @param httpClientBuilder The client to configure.
* @return Always {@code httpClientBuilder}.
*/
@Override
public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) {
// enable SSL / TLS
httpClientBuilder.setSSLStrategy(sslStrategy);
// enable user authentication
if (credentialsProvider != null) {
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
}
return httpClientBuilder;
}
}

View File

@ -0,0 +1,85 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Objects;
import java.util.function.Supplier;
/**
* {@code TemplateHttpResource}s allow the checking and uploading of templates to a remote cluster.
* <p>
* There is currently no need to check the response body of the template for consistency, but if we ever make a backwards-compatible change
* that requires the template to be replaced, then we will need to check for <em>something</em> in the body in order to see if we need to
* replace the existing template(s).
*/
public class TemplateHttpResource extends PublishableHttpResource {
private static final Logger logger = Loggers.getLogger(TemplateHttpResource.class);
/**
* The name of the template that is sent to the remote cluster.
*/
private final String templateName;
/**
* Provides a fully formed template (e.g., no variables that need replaced).
*/
private final Supplier<String> template;
/**
* Create a new {@link TemplateHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
* @param templateName The name of the template (e.g., ".template123").
* @param template The template provider.
*/
public TemplateHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
final String templateName, final Supplier<String> template) {
super(resourceOwnerName, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
this.templateName = Objects.requireNonNull(templateName);
this.template = Objects.requireNonNull(template);
}
/**
* Determine if the current {@linkplain #templateName template} exists.
*/
@Override
protected CheckResponse doCheck(final RestClient client) {
return checkForResource(client, logger,
"/_template", templateName, "monitoring template",
resourceOwnerName, "monitoring cluster");
}
/**
* Publish the missing {@linkplain #templateName template}.
*/
@Override
protected boolean doPublish(final RestClient client) {
return putResource(client, logger,
"/_template", templateName, this::templateToHttpEntity, "monitoring template",
resourceOwnerName, "monitoring cluster");
}
/**
* Create a {@link HttpEntity} for the {@link #template}.
*
* @return Never {@code null}.
*/
HttpEntity templateToHttpEntity() {
return new StringEntity(template.get(), ContentType.APPLICATION_JSON);
}
}

View File

@ -0,0 +1,74 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.config.RequestConfig.Builder;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
/**
* {@code TimeoutRequestConfigCallback} enables the setting of connection-related timeouts for HTTP requests.
*/
class TimeoutRequestConfigCallback implements RestClientBuilder.RequestConfigCallback {
@Nullable
private final TimeValue connectTimeout;
@Nullable
private final TimeValue socketTimeout;
/**
* Create a new {@link TimeoutRequestConfigCallback}.
*
* @param connectTimeout The initial connection timeout, if any is supplied
* @param socketTimeout The socket timeout, if any is supplied
*/
TimeoutRequestConfigCallback(@Nullable final TimeValue connectTimeout, @Nullable final TimeValue socketTimeout) {
assert connectTimeout != null || socketTimeout != null : "pointless to use with defaults";
this.connectTimeout = connectTimeout;
this.socketTimeout = socketTimeout;
}
/**
* Get the initial connection timeout.
*
* @return Can be {@code null} for default (1 second).
*/
@Nullable
TimeValue getConnectTimeout() {
return connectTimeout;
}
/**
* Get the socket timeout.
*
* @return Can be {@code null} for default (10 seconds).
*/
@Nullable
TimeValue getSocketTimeout() {
return socketTimeout;
}
/**
* Sets the {@linkplain Builder#setConnectTimeout(int) connect timeout} and {@linkplain Builder#setSocketTimeout(int) socket timeout}.
*
* @param requestConfigBuilder The request to configure.
* @return Always {@code requestConfigBuilder}.
*/
@Override
public Builder customizeRequestConfig(Builder requestConfigBuilder) {
if (connectTimeout != null) {
requestConfigBuilder.setConnectTimeout((int)connectTimeout.millis());
}
if (socketTimeout != null) {
requestConfigBuilder.setSocketTimeout((int)socketTimeout.millis());
}
return requestConfigBuilder;
}
}

View File

@ -0,0 +1,105 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.Version;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
/**
* {@code VersionHttpResource} verifies that the returned {@link Version} of Elasticsearch is at least the specified minimum version.
*/
public class VersionHttpResource extends HttpResource {
private static final Logger logger = Loggers.getLogger(VersionHttpResource.class);
/**
* The parameters to pass with every version request to limit the output to just the version number.
*/
public static final Map<String, String> PARAMETERS = Collections.singletonMap("filter_path", "version.number");
/**
* The minimum supported version of Elasticsearch.
*/
private final Version minimumVersion;
/**
* Create a new {@link VersionHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param minimumVersion The minimum supported version of Elasticsearch.
*/
public VersionHttpResource(final String resourceOwnerName, final Version minimumVersion) {
super(resourceOwnerName);
this.minimumVersion = Objects.requireNonNull(minimumVersion);
}
/**
* Verify that the minimum {@link Version} is supported on the remote cluster.
* <p>
* If it does not, then there is nothing that can be done except wait until it does. There is no publishing aspect to this operation.
*/
@Override
protected boolean doCheckAndPublish(final RestClient client) {
logger.trace("checking [{}] to ensure that it supports the minimum version [{}]", resourceOwnerName, minimumVersion);
try {
return validateVersion(client.performRequest("GET", "/", PARAMETERS));
} catch (IOException | RuntimeException e) {
logger.error(
(Supplier<?>)() ->
new ParameterizedMessage("failed to verify minimum version [{}] on the [{}] monitoring cluster",
minimumVersion, resourceOwnerName),
e);
}
return false;
}
/**
* Ensure that the {@code response} contains a {@link Version} that is {@linkplain Version#onOrAfter(Version) on or after} the
* {@link #minimumVersion}.
*
* @param response The response to parse.
* @return {@code true} if the remote cluster is running a supported version.
* @throws NullPointerException if the response is malformed.
* @throws ClassCastException if the response is malformed.
* @throws IOException if any parsing issue occurs.
*/
private boolean validateVersion(final Response response) throws IOException {
boolean supported = false;
try (final XContentParser parser = XContentType.JSON.xContent().createParser(response.getEntity().getContent())) {
// the response should be filtered to just '{"version":{"number":"xyz"}}', so this is cheap and guaranteed
@SuppressWarnings("unchecked")
final String versionNumber = (String)((Map<String, Object>)parser.map().get("version")).get("number");
final Version version = Version.fromString(versionNumber);
if (version.onOrAfter(minimumVersion)) {
logger.debug("version [{}] >= [{}] and supported for [{}]", version, minimumVersion, resourceOwnerName);
supported = true;
} else {
logger.error("version [{}] < [{}] and NOT supported for [{}]", version, minimumVersion, resourceOwnerName);
}
}
return supported;
}
}

View File

@ -7,6 +7,8 @@ package org.elasticsearch.xpack.monitoring.exporter.local;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
@ -23,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -54,6 +57,8 @@ import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString;
*/
public class LocalExporter extends Exporter implements ClusterStateListener, CleanerService.Listener {
private static final Logger logger = Loggers.getLogger(LocalExporter.class);
public static final String TYPE = "local";
private final InternalClient client;
@ -104,7 +109,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
@Override
public void doClose() {
if (state.getAndSet(State.TERMINATED) != State.TERMINATED) {
logger.debug("stopped");
logger.trace("stopped");
clusterService.remove(this);
cleanerService.remove(this);
}

View File

@ -1,47 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.support;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
*
*/
public final class VersionUtils {
public static final String VERSION_NUMBER_FIELD = "number";
private VersionUtils() {
}
public static Version parseVersion(byte[] text) {
return parseVersion(VERSION_NUMBER_FIELD, new String(text, Charset.forName("UTF-8")));
}
/**
* Extract &amp; parse the version contained in the given template
*/
public static Version parseVersion(String prefix, byte[] text) {
return parseVersion(prefix, new String(text, Charset.forName("UTF-8")));
}
public static Version parseVersion(String prefix, String text) {
Pattern pattern = Pattern.compile(prefix + "\"\\s*:\\s*\"?([0-9a-zA-Z\\.\\-]+)\"?");
Matcher matcher = pattern.matcher(text);
if (matcher.find()) {
String parsedVersion = matcher.group(1);
if (Strings.hasText(parsedVersion)) {
return Version.fromString(parsedVersion);
}
}
return null;
}
}

View File

@ -8,7 +8,6 @@ package org.elasticsearch.xpack.monitoring;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.is;
@ -21,7 +20,7 @@ public class MonitoringPluginClientTests extends ESTestCase {
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE)
.build();
Monitoring plugin = new Monitoring(settings, new Environment(settings), null);
Monitoring plugin = new Monitoring(settings, null);
assertThat(plugin.isEnabled(), is(true));
assertThat(plugin.isTransportClient(), is(true));
}
@ -32,7 +31,7 @@ public class MonitoringPluginClientTests extends ESTestCase {
.put("path.home", createTempDir())
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), "node")
.build();
Monitoring plugin = new Monitoring(settings, new Environment(settings), null);
Monitoring plugin = new Monitoring(settings, null);
assertThat(plugin.isEnabled(), is(true));
assertThat(plugin.isTransportClient(), is(false));
}

View File

@ -67,7 +67,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
doExporting();
logger.debug("--> templates does not exist: it should have been created in the current version");
for (String template : monitoringTemplates().keySet()) {
for (String template : monitoringTemplateNames()) {
assertTemplateExists(template);
}
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);
@ -93,7 +93,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
assertTemplateExists(indexTemplateName());
logger.debug("--> existing templates are old: new templates should be created");
for (String template : monitoringTemplates().keySet()) {
for (String template : monitoringTemplateNames()) {
assertTemplateExists(template);
}
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);
@ -115,7 +115,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
doExporting();
logger.debug("--> existing templates are up to date");
for (String template : monitoringTemplates().keySet()) {
for (String template : monitoringTemplateNames()) {
assertTemplateExists(template);
}
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);

View File

@ -0,0 +1,219 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
import java.io.IOException;
import java.util.Map;
import java.util.function.Predicate;
import static org.hamcrest.Matchers.is;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
* Base test helper for any {@link PublishableHttpResource}.
*/
public abstract class AbstractPublishableHttpResourceTestCase extends ESTestCase {
protected final String owner = getClass().getSimpleName();
@Nullable
protected final TimeValue masterTimeout = randomFrom(TimeValue.timeValueMinutes(5), null);
protected final RestClient client = mock(RestClient.class);
/**
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} and assert that it returns
* {@code true} given a {@link RestStatus} that is {@link RestStatus#OK}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
*/
protected void assertCheckExists(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName)
throws IOException {
doCheckWithStatusCode(resource, resourceBasePath, resourceName, successfulCheckStatus(), CheckResponse.EXISTS);
}
/**
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} and assert that it returns
* {@code false} given a {@link RestStatus} that is not {@link RestStatus#OK}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
*/
protected void assertCheckDoesNotExist(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName)
throws IOException {
doCheckWithStatusCode(resource, resourceBasePath, resourceName, notFoundCheckStatus(), CheckResponse.DOES_NOT_EXIST);
}
/**
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} that throws an exception and assert
* that it returns {@code false}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
*/
protected void assertCheckWithException(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName)
throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final ResponseException responseException = responseException("GET", endpoint, failedCheckStatus());
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
when(client.performRequest("GET", endpoint, resource.getParameters())).thenThrow(e);
assertThat(resource.doCheck(client), is(CheckResponse.ERROR));
}
/**
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} and assert that it returns
* {@code true} given a {@link RestStatus} that is {@link RestStatus#OK} or {@link RestStatus#CREATED}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
* @param bodyType The request body provider's type.
*/
protected void assertPublishSucceeds(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
final Class<? extends HttpEntity> bodyType)
throws IOException {
doPublishWithStatusCode(resource, resourceBasePath, resourceName, bodyType, successfulPublishStatus(), true);
}
/**
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} and assert that it returns
* {@code false} given a {@link RestStatus} that is neither {@link RestStatus#OK} or {@link RestStatus#CREATED}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
* @param bodyType The request body provider's type.
*/
protected void assertPublishFails(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
final Class<? extends HttpEntity> bodyType)
throws IOException {
doPublishWithStatusCode(resource, resourceBasePath, resourceName, bodyType, failedPublishStatus(), false);
}
/**
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} that throws an exception and assert
* that it returns {@code false}.
*
* @param resource The resource to execute.
* @param resourceBasePath The base endpoint (e.g., "/_template")
* @param resourceName The resource name (e.g., the template or pipeline name).
*/
protected void assertPublishWithException(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
final Class<? extends HttpEntity> bodyType)
throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"));
when(client.performRequest(eq("PUT"), eq(endpoint), eq(resource.getParameters()), any(bodyType))).thenThrow(e);
assertThat(resource.doPublish(client), is(false));
}
protected void assertParameters(final PublishableHttpResource resource) {
final Map<String, String> parameters = resource.getParameters();
if (masterTimeout != null) {
assertThat(parameters.get("master_timeout"), is(masterTimeout.toString()));
}
assertThat(parameters.get("filter_path"), is("$NONE"));
}
private void doCheckWithStatusCode(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
final RestStatus status,
final CheckResponse expected)
throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Response response = response("GET", endpoint, status);
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
assertThat(resource.doCheck(client), is(expected));
}
private void doPublishWithStatusCode(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
final Class<? extends HttpEntity> bodyType,
final RestStatus status,
final boolean expected)
throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Response response = response("GET", endpoint, status);
when(client.performRequest(eq("PUT"), eq(endpoint), eq(resource.getParameters()), any(bodyType))).thenReturn(response);
assertThat(resource.doPublish(client), is(expected));
}
protected RestStatus successfulCheckStatus() {
return RestStatus.OK;
}
protected RestStatus notFoundCheckStatus() {
return RestStatus.NOT_FOUND;
}
protected RestStatus failedCheckStatus() {
final Predicate<RestStatus> ignoreStatus = (final RestStatus status) -> status == RestStatus.OK || status == RestStatus.NOT_FOUND;
return randomValueOtherThanMany(ignoreStatus, () -> randomFrom(RestStatus.values()));
}
protected RestStatus successfulPublishStatus() {
return randomFrom(RestStatus.OK, RestStatus.CREATED);
}
protected RestStatus failedPublishStatus() {
final Predicate<RestStatus> ignoreStatus = (final RestStatus status) -> status == RestStatus.OK || status == RestStatus.CREATED;
return randomValueOtherThanMany(ignoreStatus, () -> randomFrom(RestStatus.values()));
}
protected String concatenateEndpoint(final String resourceBasePath, final String resourceName) {
return resourceBasePath + "/" + resourceName;
}
protected Response response(final String method, final String endpoint, final RestStatus status) {
final Response response = mock(Response.class);
// fill out the response enough so that the exception can be constructed
final RequestLine requestLine = mock(RequestLine.class);
when(requestLine.getMethod()).thenReturn(method);
when(requestLine.getUri()).thenReturn(endpoint);
final StatusLine statusLine = mock(StatusLine.class);
when(statusLine.getStatusCode()).thenReturn(status.getStatus());
when(response.getRequestLine()).thenReturn(requestLine);
when(response.getStatusLine()).thenReturn(statusLine);
return response;
}
protected ResponseException responseException(final String method, final String endpoint, final RestStatus status) {
try {
return new ResponseException(response(method, endpoint, status));
} catch (final IOException e) {
throw new IllegalStateException("update responseException to properly build the ResponseException", e);
}
}
}

View File

@ -0,0 +1,195 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicInteger;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Tests {@link HttpExportBulkResponseListener}.
*/
public class HttpExportBulkResponseListenerTests extends ESTestCase {
public void testOnSuccess() throws IOException {
final Response response = mock(Response.class);
final StringEntity entity = new StringEntity("{\"took\":5,\"errors\":false}", ContentType.APPLICATION_JSON);
when(response.getEntity()).thenReturn(entity);
// doesn't explode
new WarningsHttpExporterBulkResponseListener().onSuccess(response);
}
public void testOnSuccessParsing() throws IOException {
// {"took": 4, "errors": false, ...
final Response response = mock(Response.class);
final XContent xContent = mock(XContent.class);
final XContentParser parser = mock(XContentParser.class);
final HttpEntity entity = mock(HttpEntity.class);
final InputStream stream = mock(InputStream.class);
when(response.getEntity()).thenReturn(entity);
when(entity.getContent()).thenReturn(stream);
when(xContent.createParser(stream)).thenReturn(parser);
// {, "took", 4, "errors", false
when(parser.nextToken()).thenReturn(Token.START_OBJECT,
Token.FIELD_NAME, Token.VALUE_NUMBER,
Token.FIELD_NAME, Token.VALUE_BOOLEAN);
when(parser.currentName()).thenReturn("took", "errors");
when(parser.booleanValue()).thenReturn(false);
new HttpExportBulkResponseListener(xContent).onSuccess(response);
verify(parser, times(5)).nextToken();
verify(parser, times(2)).currentName();
verify(parser).booleanValue();
}
public void testOnSuccessWithInnerErrors() {
final String[] expectedErrors = new String[] { randomAsciiOfLengthBetween(4, 10), randomAsciiOfLengthBetween(5, 9) };
final AtomicInteger counter = new AtomicInteger(0);
final Response response = mock(Response.class);
final StringEntity entity = new StringEntity(
"{\"took\":4,\"errors\":true,\"items\":[" +
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"123\"}}," +
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"456\"," +
"\"error\":\"" + expectedErrors[0] + "\"}}," +
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"789\"}}," +
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"012\"," +
"\"error\":\"" + expectedErrors[1] + "\"}}" +
"]}",
ContentType.APPLICATION_JSON);
when(response.getEntity()).thenReturn(entity);
// doesn't explode
new WarningsHttpExporterBulkResponseListener() {
@Override
void onItemError(final String text) {
assertEquals(expectedErrors[counter.getAndIncrement()], text);
}
}.onSuccess(response);
assertEquals(expectedErrors.length, counter.get());
}
public void testOnSuccessParsingWithInnerErrors() throws IOException {
// {"took": 4, "errors": true, "items": [ { "index": { "_index": "ignored", "_type": "ignored", "_id": "ignored" },
// { "index": { "_index": "ignored", "_type": "ignored", "_id": "ignored", "error": "blah" }
// ]...
final Response response = mock(Response.class);
final XContent xContent = mock(XContent.class);
final XContentParser parser = mock(XContentParser.class);
final HttpEntity entity = mock(HttpEntity.class);
final InputStream stream = mock(InputStream.class);
when(response.getEntity()).thenReturn(entity);
when(entity.getContent()).thenReturn(stream);
when(xContent.createParser(stream)).thenReturn(parser);
// {, "took", 4, "errors", false nextToken, currentName
when(parser.nextToken()).thenReturn(Token.START_OBJECT, // 1
Token.FIELD_NAME, Token.VALUE_NUMBER, // 3, 1
Token.FIELD_NAME, Token.VALUE_BOOLEAN, // 5, 2
Token.FIELD_NAME, Token.START_ARRAY, // 7, 3
// no error:
Token.START_OBJECT, // 8
Token.FIELD_NAME, Token.START_OBJECT, // 10, 4
Token.FIELD_NAME, Token.VALUE_STRING, // 12, 5
Token.FIELD_NAME, Token.VALUE_STRING, // 14, 6
Token.FIELD_NAME, Token.VALUE_STRING, // 16, 7
Token.END_OBJECT, // 17
Token.START_OBJECT, // 18
Token.FIELD_NAME, Token.START_OBJECT, // 20, 8
Token.FIELD_NAME, Token.VALUE_STRING, // 22, 9
Token.FIELD_NAME, Token.VALUE_STRING, // 24, 10
Token.FIELD_NAME, Token.VALUE_STRING, // 26, 11
Token.FIELD_NAME, Token.VALUE_STRING, // 28, 12 ("error")
Token.END_OBJECT, // 29
Token.END_ARRAY); // 30
when(parser.currentName()).thenReturn("took", "errors", "items",
"index", "_index", "_type", "_id",
"index", "_index", "_type", "_id", "error");
// there were errors; so go diving for the error
when(parser.booleanValue()).thenReturn(true);
when(parser.text()).thenReturn("this is the error");
new HttpExportBulkResponseListener(xContent).onSuccess(response);
verify(parser, times(30)).nextToken();
verify(parser, times(12)).currentName();
verify(parser).booleanValue();
verify(parser).text();
}
public void testOnSuccessMalformed() {
final AtomicInteger counter = new AtomicInteger(0);
final Response response = mock(Response.class);
if (randomBoolean()) {
// malformed JSON
when(response.getEntity()).thenReturn(new StringEntity("{", ContentType.APPLICATION_JSON));
}
new WarningsHttpExporterBulkResponseListener() {
@Override
void onError(final String msg, final Throwable cause) {
counter.getAndIncrement();
}
}.onSuccess(response);
assertEquals(1, counter.get());
}
public void testOnFailure() {
final Exception exception = randomBoolean() ? new Exception() : new RuntimeException();
new WarningsHttpExporterBulkResponseListener() {
@Override
void onError(final String msg, final Throwable cause) {
assertSame(exception, cause);
}
}.onFailure(exception);
}
private static class WarningsHttpExporterBulkResponseListener extends HttpExportBulkResponseListener {
WarningsHttpExporterBulkResponseListener() {
super(XContentType.JSON.xContent());
}
@Override
void onItemError(final String msg) {
fail("There should be no errors within the response!");
}
@Override
void onError(final String msg, final Throwable cause) {
super.onError(msg, cause); // let it log the exception so you can check the output
fail("There should be no errors!");
}
}
}

View File

@ -0,0 +1,590 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import com.squareup.okhttp.mockwebserver.MockResponse;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.RecordedRequest;
import okio.Buffer;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.xpack.monitoring.MonitoredSystem;
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStateMonitoringDoc;
import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryMonitoringDoc;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
import org.elasticsearch.xpack.monitoring.resolver.bulk.MonitoringBulkTimestampedResolver;
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
import org.joda.time.format.DateTimeFormat;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.FILTER_PATH_NONE;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
public class HttpExporterIT extends MonitoringIntegTestCase {
private MockWebServerContainer webServerContainer;
private MockWebServer webServer;
@Before
public void startWebServer() {
webServerContainer = new MockWebServerContainer();
webServer = webServerContainer.getWebServer();
}
@After
public void stopWebServer() throws Exception {
webServer.shutdown();
}
@Override
protected boolean ignoreExternalCluster() {
return true;
}
public void testExport() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
final Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
internalCluster().startNode(builder);
final int nbDocs = randomIntBetween(1, 25);
export(newRandomMonitoringDocs(nbDocs));
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
assertBulk(webServer, nbDocs);
}
public void testExportWithHeaders() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final String headerValue = randomAsciiOfLengthBetween(3, 9);
final String[] array = generateRandomStringArray(2, 4, false);
final Map<String, String[]> headers = new HashMap<>();
headers.put("X-Cloud-Cluster", new String[] { headerValue });
headers.put("X-Found-Cluster", new String[] { headerValue });
headers.put("Array-Check", array);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress())
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
internalCluster().startNode(builder);
final int nbDocs = randomIntBetween(1, 25);
export(newRandomMonitoringDocs(nbDocs));
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready, headers, null);
assertBulk(webServer, nbDocs, headers, null);
}
public void testExportWithBasePath() throws Exception {
final boolean useHeaders = randomBoolean();
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final String headerValue = randomAsciiOfLengthBetween(3, 9);
final String[] array = generateRandomStringArray(2, 4, false);
final Map<String, String[]> headers = new HashMap<>();
if (useHeaders) {
headers.put("X-Cloud-Cluster", new String[] { headerValue });
headers.put("X-Found-Cluster", new String[] { headerValue });
headers.put("Array-Check", array);
}
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false}");
String basePath = "path/to";
if (randomBoolean()) {
basePath += "/something";
if (rarely()) {
basePath += "/proxied";
}
}
if (randomBoolean()) {
basePath = "/" + basePath;
}
final Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress())
.put("xpack.monitoring.exporters._http.proxy.base_path", basePath + (randomBoolean() ? "/" : ""));
if (useHeaders) {
builder
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
}
internalCluster().startNode(builder);
final int nbDocs = randomIntBetween(1, 25);
export(newRandomMonitoringDocs(nbDocs));
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready, headers, basePath);
assertBulk(webServer, nbDocs, headers, basePath);
}
public void testHostChangeReChecksTemplate() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false}");
internalCluster().startNode(builder);
export(Collections.singletonList(newRandomMonitoringDoc()));
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
assertBulk(webServer);
try (final MockWebServerContainer secondWebServerContainer = new MockWebServerContainer(webServerContainer.getPort() + 1)) {
final MockWebServer secondWebServer = secondWebServerContainer.getWebServer();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().putArray("xpack.monitoring.exporters._http.host", secondWebServerContainer.getFormattedAddress())));
enqueueGetClusterVersionResponse(secondWebServer, Version.CURRENT);
// pretend that one of the templates is missing
for (Tuple<String, String> template : monitoringTemplates()) {
if (template.v1().contains(MonitoringBulkTimestampedResolver.Data.DATA)) {
enqueueResponse(secondWebServer, 200, "template [" + template + "] exists");
} else {
enqueueResponse(secondWebServer, 404, "template [" + template + "] does not exist");
enqueueResponse(secondWebServer, 201, "template [" + template + "] created");
}
}
// opposite of if it existed before
enqueuePipelineResponses(secondWebServer, !pipelineExistsAlready);
enqueueResponse(secondWebServer, 200, "{\"errors\": false}");
logger.info("--> exporting a second event");
export(Collections.singletonList(newRandomMonitoringDoc()));
assertMonitorVersion(secondWebServer);
for (Tuple<String, String> template : monitoringTemplates()) {
RecordedRequest recordedRequest = secondWebServer.takeRequest();
assertThat(recordedRequest.getMethod(), equalTo("GET"));
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.v1() + resourceQueryString()));
if (template.v1().contains(MonitoringBulkTimestampedResolver.Data.DATA) == false) {
recordedRequest = secondWebServer.takeRequest();
assertThat(recordedRequest.getMethod(), equalTo("PUT"));
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.v1() + resourceQueryString()));
assertThat(recordedRequest.getBody().readUtf8(), equalTo(template.v2()));
}
}
assertMonitorPipelines(secondWebServer, !pipelineExistsAlready, null, null);
assertBulk(secondWebServer);
}
}
public void testUnsupportedClusterVersion() throws Exception {
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
// returning an unsupported cluster version
enqueueGetClusterVersionResponse(randomFrom(Version.fromString("0.18.0"), Version.fromString("1.0.0"),
Version.fromString("1.4.0"), Version.fromString("2.4.0")));
String agentNode = internalCluster().startNode(builder);
// fire off what should be an unsuccessful request
assertNull(getExporter(agentNode).openBulk());
assertThat(webServer.getRequestCount(), equalTo(1));
assertMonitorVersion(webServer);
}
public void testDynamicIndexFormatChange() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
internalCluster().startNode(builder);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
MonitoringDoc doc = newRandomMonitoringDoc();
export(Collections.singletonList(doc));
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
RecordedRequest recordedRequest = assertBulk(webServer);
@SuppressWarnings("unchecked")
String indexName = new ResolversRegistry(Settings.EMPTY).getResolver(doc).index(doc);
byte[] bytes = recordedRequest.getBody().readByteArray();
Map<String, Object> data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
@SuppressWarnings("unchecked")
Map<String, Object> index = (Map<String, Object>) data.get("index");
assertThat(index.get("_index"), equalTo(indexName));
String newTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM");
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put("xpack.monitoring.exporters._http.index.name.time_format", newTimeFormat)));
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, true, true);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
doc = newRandomMonitoringDoc();
export(Collections.singletonList(doc));
String expectedMonitoringIndex = ".monitoring-es-" + MonitoringTemplateUtils.TEMPLATE_VERSION + "-"
+ DateTimeFormat.forPattern(newTimeFormat).withZoneUTC().print(doc.getTimestamp());
assertMonitorResources(webServer, true, true);
recordedRequest = assertBulk(webServer);
bytes = recordedRequest.getBody().readByteArray();
data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
@SuppressWarnings("unchecked")
final Map<String, Object> newIndex = (Map<String, Object>) data.get("index");
assertThat(newIndex.get("_index"), equalTo(expectedMonitoringIndex));
}
private void assertMonitorVersion(final MockWebServer webServer) throws Exception {
assertMonitorVersion(webServer, null, null);
}
private void assertMonitorVersion(final MockWebServer webServer,
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
throws Exception {
final String pathPrefix = basePathToAssertablePrefix(basePath);
final RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo(pathPrefix + "/?filter_path=version.number"));
assertHeaders(request, customHeaders);
}
private void assertMonitorResources(final MockWebServer webServer,
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists)
throws Exception {
assertMonitorResources(webServer, templateAlreadyExists, pipelineAlreadyExists, null, null);
}
private void assertMonitorResources(final MockWebServer webServer,
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists,
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
throws Exception {
assertMonitorVersion(webServer, customHeaders, basePath);
assertMonitorTemplates(webServer, templateAlreadyExists, customHeaders, basePath);
assertMonitorPipelines(webServer, pipelineAlreadyExists, customHeaders, basePath);
}
private void assertMonitorTemplates(final MockWebServer webServer, final boolean alreadyExists,
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
throws Exception {
final String pathPrefix = basePathToAssertablePrefix(basePath);
RecordedRequest request;
for (Tuple<String, String> template : monitoringTemplates()) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo(pathPrefix + "/_template/" + template.v1() + resourceQueryString()));
assertHeaders(request, customHeaders);
if (alreadyExists == false) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("PUT"));
assertThat(request.getPath(), equalTo(pathPrefix + "/_template/" + template.v1() + resourceQueryString()));
assertThat(request.getBody().readUtf8(), equalTo(template.v2()));
assertHeaders(request, customHeaders);
}
}
}
private void assertMonitorPipelines(final MockWebServer webServer, final boolean alreadyExists,
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
throws Exception {
final String pathPrefix = basePathToAssertablePrefix(basePath);
RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo(pathPrefix + "/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME + resourceQueryString()));
assertHeaders(request, customHeaders);
if (alreadyExists == false) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("PUT"));
assertThat(request.getPath(),
equalTo(pathPrefix + "/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME + resourceQueryString()));
assertThat(request.getBody().readUtf8(), equalTo(Exporter.emptyPipeline(XContentType.JSON).string()));
assertHeaders(request, customHeaders);
}
}
private RecordedRequest assertBulk(final MockWebServer webServer) throws Exception {
return assertBulk(webServer, -1);
}
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs) throws Exception {
return assertBulk(webServer, docs, null, null);
}
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs,
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
throws Exception {
final String pathPrefix = basePathToAssertablePrefix(basePath);
final RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("POST"));
assertThat(request.getPath(), equalTo(pathPrefix + "/_bulk" + bulkQueryString()));
assertHeaders(request, customHeaders);
if (docs != -1) {
assertBulkRequest(request.getBody(), docs);
}
return request;
}
private void assertHeaders(final RecordedRequest request, final Map<String, String[]> customHeaders) {
if (customHeaders != null) {
for (final Map.Entry<String, String[]> entry : customHeaders.entrySet()) {
final String header = entry.getKey();
final String[] values = entry.getValue();
final List<String> headerValues = request.getHeaders().values(header);
assertThat(header, headerValues, hasSize(values.length));
assertThat(header, headerValues, containsInAnyOrder(values));
}
}
}
private void export(Collection<MonitoringDoc> docs) throws Exception {
Exporters exporters = internalCluster().getInstance(Exporters.class);
assertThat(exporters, notNullValue());
// Wait for exporting bulks to be ready to export
assertBusy(() -> exporters.forEach(exporter -> assertThat(exporter.openBulk(), notNullValue())));
exporters.export(docs);
}
private HttpExporter getExporter(String nodeName) {
Exporters exporters = internalCluster().getInstance(Exporters.class, nodeName);
return (HttpExporter) exporters.iterator().next();
}
private MonitoringDoc newRandomMonitoringDoc() {
if (randomBoolean()) {
IndexRecoveryMonitoringDoc doc = new IndexRecoveryMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
doc.setClusterUUID(internalCluster().getClusterName());
doc.setTimestamp(System.currentTimeMillis());
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
doc.setRecoveryResponse(new RecoveryResponse());
return doc;
} else {
ClusterStateMonitoringDoc doc = new ClusterStateMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
doc.setClusterUUID(internalCluster().getClusterName());
doc.setTimestamp(System.currentTimeMillis());
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
doc.setClusterState(ClusterState.PROTO);
doc.setStatus(ClusterHealthStatus.GREEN);
return doc;
}
}
private List<MonitoringDoc> newRandomMonitoringDocs(int nb) {
List<MonitoringDoc> docs = new ArrayList<>(nb);
for (int i = 0; i < nb; i++) {
docs.add(newRandomMonitoringDoc());
}
return docs;
}
private String basePathToAssertablePrefix(@Nullable final String basePath) {
if (basePath == null) {
return "";
}
return basePath.startsWith("/") == false ? "/" + basePath : basePath;
}
private String resourceQueryString() {
return "?filter_path=" + urlEncode(FILTER_PATH_NONE);
}
private String bulkQueryString() {
return "?pipeline=" + urlEncode(Exporter.EXPORT_PIPELINE_NAME) + "&filter_path=" + urlEncode("errors,items.*.error");
}
private String urlEncode(final String value) {
try {
return URLEncoder.encode(value, "UTF-8");
} catch (UnsupportedEncodingException e) {
// whelp, our JVM is broken
throw new RuntimeException(e);
}
}
private void enqueueGetClusterVersionResponse(Version v) throws IOException {
enqueueGetClusterVersionResponse(webServer, v);
}
private void enqueueGetClusterVersionResponse(MockWebServer mockWebServer, Version v) throws IOException {
mockWebServer.enqueue(new MockResponse().setResponseCode(200).setBody(
jsonBuilder()
.startObject().startObject("version").field("number", v.toString()).endObject().endObject().bytes()
.utf8ToString()));
}
private void enqueueTemplateAndPipelineResponses(final MockWebServer webServer,
final boolean templatesAlreadyExists, final boolean pipelineAlreadyExists)
throws IOException {
enqueueTemplateResponses(webServer, templatesAlreadyExists);
enqueuePipelineResponses(webServer, pipelineAlreadyExists);
}
private void enqueueTemplateResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
if (alreadyExists) {
enqueueTemplateResponsesExistsAlready(webServer);
} else {
enqueueTemplateResponsesDoesNotExistYet(webServer);
}
}
private void enqueueTemplateResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
for (String template : monitoringTemplateNames()) {
enqueueResponse(webServer, 404, "template [" + template + "] does not exist");
enqueueResponse(webServer, 201, "template [" + template + "] created");
}
}
private void enqueueTemplateResponsesExistsAlready(final MockWebServer webServer) throws IOException {
for (String template : monitoringTemplateNames()) {
enqueueResponse(webServer, 200, "template [" + template + "] exists");
}
}
private void enqueuePipelineResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
if (alreadyExists) {
enqueuePipelineResponsesExistsAlready(webServer);
} else {
enqueuePipelineResponsesDoesNotExistYet(webServer);
}
}
private void enqueuePipelineResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
enqueueResponse(webServer, 404, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] does not exist");
enqueueResponse(webServer, 201, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] created");
}
private void enqueuePipelineResponsesExistsAlready(final MockWebServer webServer) throws IOException {
enqueueResponse(webServer, 200, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] exists");
}
private void enqueueResponse(int responseCode, String body) throws IOException {
enqueueResponse(webServer, responseCode, body);
}
private void enqueueResponse(MockWebServer mockWebServer, int responseCode, String body) throws IOException {
mockWebServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body));
}
private void assertBulkRequest(Buffer requestBody, int numberOfActions) throws Exception {
BulkRequest bulkRequest = Requests.bulkRequest().add(new BytesArray(requestBody.readByteArray()), null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(numberOfActions));
for (ActionRequest actionRequest : bulkRequest.requests()) {
assertThat(actionRequest, instanceOf(IndexRequest.class));
}
}
}

View File

@ -0,0 +1,382 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.StatusLine;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.Version;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyMapOf;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.startsWith;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
/**
* Tests {@link HttpExporter} explicitly for its resource handling.
*/
public class HttpExporterResourceTests extends AbstractPublishableHttpResourceTestCase {
private final int EXPECTED_TEMPLATES = 3;
private final RestClient client = mock(RestClient.class);
private final Response versionResponse = mock(Response.class);
private final MultiHttpResource resources =
HttpExporter.createResources(new Exporter.Config("_http", "http", Settings.EMPTY), new ResolversRegistry(Settings.EMPTY));
public void testInvalidVersionBlocks() throws IOException {
final HttpEntity entity = new StringEntity("{\"version\":{\"number\":\"unknown\"}}", ContentType.APPLICATION_JSON);
when(versionResponse.getEntity()).thenReturn(entity);
when(client.performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class))).thenReturn(versionResponse);
assertTrue(resources.isDirty());
assertFalse(resources.checkAndPublish(client));
// ensure it didn't magically become clean
assertTrue(resources.isDirty());
verifyVersionCheck();
verifyNoMoreInteractions(client);
}
public void testTemplateCheckBlocksAfterSuccessfulVersion() throws IOException {
final Exception exception = failureGetException();
final boolean firstSucceeds = randomBoolean();
int expectedGets = 1;
int expectedPuts = 0;
whenValidVersionResponse();
// failure in the middle of various templates being checked/published; suggests a node dropped
if (firstSucceeds) {
final boolean successfulFirst = randomBoolean();
// -2 from one success + a necessary failure after it!
final int extraPasses = randomIntBetween(0, EXPECTED_TEMPLATES - 2);
final int successful = randomIntBetween(0, extraPasses);
final int unsuccessful = extraPasses - successful;
final Response first = successfulFirst ? successfulGetResponse() : unsuccessfulGetResponse();
final List<Response> otherResponses = getResponses(successful, unsuccessful);
// last check fails implies that N - 2 publishes succeeded!
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
.thenReturn(first, otherResponses.toArray(new Response[otherResponses.size()]))
.thenThrow(exception);
whenSuccessfulPutTemplates(otherResponses.size() + 1);
expectedGets += 1 + successful + unsuccessful;
expectedPuts = (successfulFirst ? 0 : 1) + unsuccessful;
} else {
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
.thenThrow(exception);
}
assertTrue(resources.isDirty());
assertFalse(resources.checkAndPublish(client));
// ensure it didn't magically become
assertTrue(resources.isDirty());
verifyVersionCheck();
verifyGetTemplates(expectedGets);
verifyPutTemplates(expectedPuts);
verifyNoMoreInteractions(client);
}
public void testTemplatePublishBlocksAfterSuccessfulVersion() throws IOException {
final Exception exception = failurePutException();
final boolean firstSucceeds = randomBoolean();
int expectedGets = 1;
int expectedPuts = 1;
whenValidVersionResponse();
// failure in the middle of various templates being checked/published; suggests a node dropped
if (firstSucceeds) {
final Response firstSuccess = successfulPutResponse();
// -2 from one success + a necessary failure after it!
final int extraPasses = randomIntBetween(0, EXPECTED_TEMPLATES - 2);
final int successful = randomIntBetween(0, extraPasses);
final int unsuccessful = extraPasses - successful;
final List<Response> otherResponses = successfulPutResponses(unsuccessful);
// first one passes for sure, so we need an extra "unsuccessful" GET
whenGetTemplates(successful, unsuccessful + 2);
// previous publishes must have succeeded
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
.thenReturn(firstSuccess, otherResponses.toArray(new Response[otherResponses.size()]))
.thenThrow(exception);
// GETs required for each PUT attempt (first is guaranteed "unsuccessful")
expectedGets += successful + unsuccessful + 1;
// unsuccessful are PUT attempts + the guaranteed successful PUT (first)
expectedPuts += unsuccessful + 1;
} else {
// fail the check so that it has to attempt the PUT
whenGetTemplates(0, 1);
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
.thenThrow(exception);
}
assertTrue(resources.isDirty());
assertFalse(resources.checkAndPublish(client));
// ensure it didn't magically become
assertTrue(resources.isDirty());
verifyVersionCheck();
verifyGetTemplates(expectedGets);
verifyPutTemplates(expectedPuts);
verifyNoMoreInteractions(client);
}
public void testPipelineCheckBlocksAfterSuccessfulTemplates() throws IOException {
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
final Exception exception = failureGetException();
whenValidVersionResponse();
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
whenSuccessfulPutTemplates(EXPECTED_TEMPLATES);
// we only expect a single pipeline for now
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
.thenThrow(exception);
assertTrue(resources.isDirty());
assertFalse(resources.checkAndPublish(client));
// ensure it didn't magically become
assertTrue(resources.isDirty());
verifyVersionCheck();
verifyGetTemplates(EXPECTED_TEMPLATES);
verifyPutTemplates(unsuccessfulGetTemplates);
verifyGetPipelines(1);
verifyPutPipelines(0);
verifyNoMoreInteractions(client);
}
public void testPipelinePublishBlocksAfterSuccessfulTemplates() throws IOException {
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
final Exception exception = failurePutException();
whenValidVersionResponse();
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
whenSuccessfulPutTemplates(EXPECTED_TEMPLATES);
// pipeline can't be there
whenGetPipelines(0, 1);
// we only expect a single pipeline for now
when(client.performRequest(eq("PUT"),
startsWith("/_ingest/pipeline/"),
anyMapOf(String.class, String.class),
any(HttpEntity.class)))
.thenThrow(exception);
assertTrue(resources.isDirty());
assertFalse(resources.checkAndPublish(client));
// ensure it didn't magically become
assertTrue(resources.isDirty());
verifyVersionCheck();
verifyGetTemplates(EXPECTED_TEMPLATES);
verifyPutTemplates(unsuccessfulGetTemplates);
verifyGetPipelines(1);
verifyPutPipelines(1);
verifyNoMoreInteractions(client);
}
public void testSuccessfulChecks() throws IOException {
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
final int successfulGetPipelines = randomIntBetween(0, 1);
final int unsuccessfulGetPipelines = 1 - successfulGetPipelines;
whenValidVersionResponse();
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
whenSuccessfulPutTemplates(unsuccessfulGetTemplates);
whenGetPipelines(successfulGetPipelines, unsuccessfulGetPipelines);
whenSuccessfulPutPipelines(1);
assertTrue(resources.isDirty());
// it should be able to proceed!
assertTrue(resources.checkAndPublish(client));
assertFalse(resources.isDirty());
verifyVersionCheck();
verifyGetTemplates(EXPECTED_TEMPLATES);
verifyPutTemplates(unsuccessfulGetTemplates);
verifyGetPipelines(1);
verifyPutPipelines(unsuccessfulGetPipelines);
verifyNoMoreInteractions(client);
}
private Exception failureGetException() {
final ResponseException responseException = responseException("GET", "/_get_something", failedCheckStatus());
return randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
}
private Exception failurePutException() {
final ResponseException responseException = responseException("PUT", "/_put_something", failedPublishStatus());
return randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
}
private Response successfulGetResponse() {
return response("GET", "/_get_something", successfulCheckStatus());
}
private Response unsuccessfulGetResponse() {
return response("GET", "/_get_something", notFoundCheckStatus());
}
private List<Response> getResponses(final int successful, final int unsuccessful) {
final List<Response> responses = new ArrayList<>(successful);
for (int i = 0; i < successful; ++i) {
responses.add(successfulGetResponse());
}
for (int i = 0; i < unsuccessful; ++i) {
responses.add(unsuccessfulGetResponse());
}
return responses;
}
private Response successfulPutResponse() {
final Response response = mock(Response.class);
final StatusLine statusLine = mock(StatusLine.class);
when(response.getStatusLine()).thenReturn(statusLine);
when(statusLine.getStatusCode()).thenReturn(randomFrom(RestStatus.OK, RestStatus.CREATED).getStatus());
return response;
}
private List<Response> successfulPutResponses(final int successful) {
final List<Response> responses = new ArrayList<>(successful);
for (int i = 0; i < successful; ++i) {
responses.add(successfulPutResponse());
}
return responses;
}
private void whenValidVersionResponse() throws IOException {
final HttpEntity entity = new StringEntity("{\"version\":{\"number\":\"" + Version.CURRENT + "\"}}", ContentType.APPLICATION_JSON);
when(versionResponse.getEntity()).thenReturn(entity);
when(client.performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class))).thenReturn(versionResponse);
}
private void whenGetTemplates(final int successful, final int unsuccessful) throws IOException {
final List<Response> gets = getResponses(successful, unsuccessful);
if (gets.size() == 1) {
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
.thenReturn(gets.get(0));
} else {
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
.thenReturn(gets.get(0), gets.subList(1, gets.size()).toArray(new Response[gets.size() - 1]));
}
}
private void whenSuccessfulPutTemplates(final int successful) throws IOException {
final List<Response> successfulPuts = successfulPutResponses(successful);
// empty is possible if they all exist
if (successful == 1) {
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
.thenReturn(successfulPuts.get(0));
} else if (successful > 1) {
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
.thenReturn(successfulPuts.get(0), successfulPuts.subList(1, successful).toArray(new Response[successful - 1]));
}
}
private void whenGetPipelines(final int successful, final int unsuccessful) throws IOException {
final List<Response> gets = getResponses(successful, unsuccessful);
if (gets.size() == 1) {
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
.thenReturn(gets.get(0));
} else {
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
.thenReturn(gets.get(0), gets.subList(1, gets.size()).toArray(new Response[gets.size() - 1]));
}
}
private void whenSuccessfulPutPipelines(final int successful) throws IOException {
final List<Response> successfulPuts = successfulPutResponses(successful);
// empty is possible if they all exist
if (successful == 1) {
when(client.performRequest(eq("PUT"),
startsWith("/_ingest/pipeline/"),
anyMapOf(String.class, String.class),
any(HttpEntity.class)))
.thenReturn(successfulPuts.get(0));
} else if (successful > 1) {
when(client.performRequest(eq("PUT"),
startsWith("/_ingest/pipeline/"),
anyMapOf(String.class, String.class),
any(HttpEntity.class)))
.thenReturn(successfulPuts.get(0), successfulPuts.subList(1, successful).toArray(new Response[successful - 1]));
}
}
private void verifyVersionCheck() throws IOException {
verify(client).performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class));
}
private void verifyGetTemplates(final int called) throws IOException {
verify(client, times(called)).performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class));
}
private void verifyPutTemplates(final int called) throws IOException {
verify(client, times(called)).performRequest(eq("PUT"), // method
startsWith("/_template/"), // endpoint
anyMapOf(String.class, String.class), // parameters (e.g., timeout)
any(HttpEntity.class)); // raw template
}
private void verifyGetPipelines(final int called) throws IOException {
verify(client, times(called)).performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class));
}
private void verifyPutPipelines(final int called) throws IOException {
verify(client, times(called)).performRequest(eq("PUT"), // method
startsWith("/_ingest/pipeline/"), // endpoint
anyMapOf(String.class, String.class), // parameters (e.g., timeout)
any(HttpEntity.class)); // raw template
}
}

View File

@ -1,137 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.elasticsearch.xpack.ssl.SSLService;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.mock;
/**
* Tests for {@link HttpExporter}.
*/
public class HttpExporterSimpleTests extends ESTestCase {
private final Environment environment = mock(Environment.class);
public void testExporterWithBlacklistedHeaders() {
final String blacklistedHeader = randomFrom(HttpExporter.BLACKLISTED_HEADERS);
final String expected = "[" + blacklistedHeader + "] cannot be overwritten via [xpack.monitoring.exporters._http.headers]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200")
.put("xpack.monitoring.exporters._http.headers.abc", "xyz")
.put("xpack.monitoring.exporters._http.headers." + blacklistedHeader, "value should not matter");
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
}
final Exporter.Config config = createConfig("_http", builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> {
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
});
assertThat(exception.getMessage(), equalTo(expected));
}
public void testExporterWithEmptyHeaders() {
final String name = randomFrom("abc", "ABC", "X-Flag");
final String expected = "headers must have values, missing for setting [xpack.monitoring.exporters._http.headers." + name + "]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
.put("xpack.monitoring.exporters._http.headers." + name, "");
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
}
final Exporter.Config config = createConfig("_http", builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> {
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
});
assertThat(exception.getMessage(), equalTo(expected));
}
public void testExporterWithMissingHost() {
// forgot host!
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.host", "");
} else if (randomBoolean()) {
builder.putArray("xpack.monitoring.exporters._http.host");
} else if (randomBoolean()) {
builder.putNull("xpack.monitoring.exporters._http.host");
}
final Exporter.Config config = createConfig("_http", builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> {
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
});
assertThat(exception.getMessage(), equalTo("missing required setting [xpack.monitoring.exporters._http.host]"));
}
public void testExporterWithInvalidHost() {
final String invalidHost = randomFrom("://localhost:9200", "gopher!://xyz.my.com");
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
// sometimes add a valid URL with it
if (randomBoolean()) {
if (randomBoolean()) {
builder.putArray("xpack.monitoring.exporters._http.host", "localhost:9200", invalidHost);
} else {
builder.putArray("xpack.monitoring.exporters._http.host", invalidHost, "localhost:9200");
}
} else {
builder.put("xpack.monitoring.exporters._http.host", invalidHost);
}
final Exporter.Config config = createConfig("_http", builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> {
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
});
assertThat(exception.getMessage(), equalTo("[xpack.monitoring.exporters._http.host] invalid host: [" + invalidHost + "]"));
}
public void testExporterWithHostOnly() {
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
final Exporter.Config config = createConfig("_http", builder.build());
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
}
/**
* Create the {@link Exporter.Config} with the given name, and select those settings from {@code settings}.
*
* @param name The name of the exporter.
* @param settings The settings to select the exporter's settings from
* @return Never {@code null}.
*/
private static Exporter.Config createConfig(String name, Settings settings) {
return new Exporter.Config(name, HttpExporter.TYPE, Settings.EMPTY, settings.getAsSettings("xpack.monitoring.exporters." + name));
}
}

View File

@ -1,211 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import com.squareup.okhttp.mockwebserver.Dispatcher;
import com.squareup.okhttp.mockwebserver.MockResponse;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.RecordedRequest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.monitoring.exporter.AbstractExporterTemplateTestCase;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.junit.After;
import org.junit.Before;
import java.net.BindException;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.core.Is.is;
public class HttpExporterTemplateTests extends AbstractExporterTemplateTestCase {
private MockWebServer webServer;
private MockServerDispatcher dispatcher;
@Before
public void startWebServer() throws Exception {
for (int webPort = 9250; webPort < 9300; webPort++) {
try {
webServer = new MockWebServer();
dispatcher = new MockServerDispatcher();
webServer.setDispatcher(dispatcher);
webServer.start(webPort);
return;
} catch (BindException be) {
logger.warn("port [{}] was already in use trying next port", webPort);
}
}
throw new ElasticsearchException("unable to find open port between 9200 and 9300");
}
@After
public void stopWebServer() throws Exception {
webServer.shutdown();
}
@Override
protected Settings exporterSettings() {
return Settings.builder()
.put("type", "http")
.put("host", webServer.getHostName() + ":" + webServer.getPort())
.put("connection.keep_alive", false)
.put(Exporter.INDEX_NAME_TIME_FORMAT_SETTING, "YYYY")
.build();
}
@Override
protected void deleteTemplates() throws Exception {
dispatcher.templates.clear();
}
@Override
protected void deletePipeline() throws Exception {
dispatcher.pipelines.clear();
}
@Override
protected void putTemplate(String name) throws Exception {
dispatcher.templates.put(name, generateTemplateSource(name));
}
@Override
protected void putPipeline(String name) throws Exception {
dispatcher.pipelines.put(name, Exporter.emptyPipeline(XContentType.JSON).bytes());
}
@Override
protected void assertTemplateExists(String name) throws Exception {
assertThat("failed to find a template matching [" + name + "]", dispatcher.templates.containsKey(name), is(true));
}
@Override
protected void assertPipelineExists(String name) throws Exception {
assertThat("failed to find a pipeline matching [" + name + "]", dispatcher.pipelines.containsKey(name), is(true));
}
@Override
protected void assertTemplateNotUpdated(String name) throws Exception {
// Checks that no PUT Template request has been made
assertThat(dispatcher.hasRequest("PUT", "/_template/" + name), is(false));
// Checks that the current template exists
assertThat(dispatcher.templates.containsKey(name), is(true));
}
@Override
protected void assertPipelineNotUpdated(String name) throws Exception {
// Checks that no PUT pipeline request has been made
assertThat(dispatcher.hasRequest("PUT", "/_ingest/pipeline/" + name), is(false));
// Checks that the current pipeline exists
assertThat(dispatcher.pipelines.containsKey(name), is(true));
}
@Override
protected void awaitIndexExists(String index) throws Exception {
Runnable busy = () -> assertThat("could not find index " + index, dispatcher.hasIndex(index), is(true));
assertBusy(busy, 10, TimeUnit.SECONDS);
}
class MockServerDispatcher extends Dispatcher {
private final MockResponse NOT_FOUND = newResponse(404, "");
private final Set<String> requests = new HashSet<>();
private final Map<String, BytesReference> templates = ConcurrentCollections.newConcurrentMap();
private final Map<String, BytesReference> pipelines = ConcurrentCollections.newConcurrentMap();
private final Set<String> indices = ConcurrentCollections.newConcurrentSet();
@Override
public MockResponse dispatch(RecordedRequest request) throws InterruptedException {
final String requestLine = request.getRequestLine();
requests.add(requestLine);
// Cluster version
if ("GET / HTTP/1.1".equals(requestLine)) {
return newResponse(200, "{\"version\": {\"number\": \"" + Version.CURRENT.toString() + "\"}}");
// Bulk
} else if ("POST".equals(request.getMethod()) && request.getPath().startsWith("/_bulk")) {
// Parse the bulk request and extract all index names
try {
BulkRequest bulk = new BulkRequest();
byte[] source = request.getBody().readByteArray();
bulk.add(source, 0, source.length);
for (ActionRequest docRequest : bulk.requests()) {
if (docRequest instanceof IndexRequest) {
indices.add(((IndexRequest) docRequest).index());
}
}
} catch (Exception e) {
return newResponse(500, e.getMessage());
}
return newResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
// Templates and Pipelines
} else if ("GET".equals(request.getMethod()) || "PUT".equals(request.getMethod())) {
final String[] paths = request.getPath().split("/");
if (paths.length > 2) {
// Templates
if ("_template".equals(paths[1])) {
// _template/{name}
return newResponseForType(templates, request, paths[2]);
} else if ("_ingest".equals(paths[1])) {
// _ingest/pipeline/{name}
return newResponseForType(pipelines, request, paths[3]);
}
}
}
return newResponse(500, "MockServerDispatcher does not support: " + request.getRequestLine());
}
private MockResponse newResponseForType(Map<String, BytesReference> type, RecordedRequest request, String name) {
final boolean exists = type.containsKey(name);
if ("GET".equals(request.getMethod())) {
return exists ? newResponse(200, type.get(name).utf8ToString()) : NOT_FOUND;
} else if ("PUT".equals(request.getMethod())) {
type.put(name, new BytesArray(request.getMethod()));
return exists ? newResponse(200, "updated") : newResponse(201, "created");
}
return newResponse(500, request.getMethod() + " " + request.getPath() + " is not supported");
}
MockResponse newResponse(int code, String body) {
return new MockResponse().setResponseCode(code).setBody(body);
}
int countRequests(String method, String path) {
int count = 0;
for (String request : requests) {
if (request.startsWith(method + " " + path)) {
count += 1;
}
}
return count;
}
boolean hasRequest(String method, String path) {
return countRequests(method, path) > 0;
}
boolean hasIndex(String index) {
return indices.contains(index);
}
}
}

View File

@ -5,606 +5,422 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import com.squareup.okhttp.mockwebserver.MockResponse;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.QueueDispatcher;
import com.squareup.okhttp.mockwebserver.RecordedRequest;
import okio.Buffer;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.xpack.monitoring.MonitoredSystem;
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStateMonitoringDoc;
import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryMonitoringDoc;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
import org.elasticsearch.xpack.monitoring.resolver.bulk.MonitoringBulkTimestampedResolver;
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
import org.joda.time.format.DateTimeFormat;
import org.junit.After;
import org.junit.Before;
import org.elasticsearch.xpack.monitoring.exporter.Exporter.Config;
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
import org.elasticsearch.xpack.ssl.SSLService;
import org.mockito.InOrder;
import java.io.IOException;
import java.net.BindException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyMapOf;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.atMost;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
/**
* Tests {@link HttpExporter}.
*/
public class HttpExporterTests extends ESTestCase {
private final SSLService sslService = mock(SSLService.class);
public void testExporterWithBlacklistedHeaders() {
final String blacklistedHeader = randomFrom(HttpExporter.BLACKLISTED_HEADERS);
final String expected = "[" + blacklistedHeader + "] cannot be overwritten via [xpack.monitoring.exporters._http.headers]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200")
.put("xpack.monitoring.exporters._http.headers.abc", "xyz")
.put("xpack.monitoring.exporters._http.headers." + blacklistedHeader, "value should not matter");
@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
public class HttpExporterTests extends MonitoringIntegTestCase {
private int webPort;
private MockWebServer webServer;
@Before
public void startWebservice() throws Exception {
for (webPort = 9250; webPort < 9300; webPort++) {
try {
webServer = new MockWebServer();
QueueDispatcher dispatcher = new QueueDispatcher();
dispatcher.setFailFast(true);
webServer.setDispatcher(dispatcher);
webServer.start(webPort);
return;
} catch (BindException be) {
logger.warn("port [{}] was already in use trying next port", webPort);
}
}
throw new ElasticsearchException("unable to find open port between 9200 and 9300");
}
@After
public void cleanup() throws Exception {
webServer.shutdown();
}
private int expectedTemplateAndPipelineCalls(final boolean templateAlreadyExists, final boolean pipelineAlreadyExists) {
return expectedTemplateCalls(templateAlreadyExists) + expectedPipelineCalls(pipelineAlreadyExists);
}
private int expectedTemplateCalls(final boolean alreadyExists) {
return monitoringTemplates().size() * (alreadyExists ? 1 : 2);
}
private int expectedPipelineCalls(final boolean alreadyExists) {
return alreadyExists ? 1 : 2;
}
private void assertMonitorVersion(final MockWebServer webServer) throws Exception {
assertMonitorVersion(webServer, null);
}
private void assertMonitorVersion(final MockWebServer webServer, @Nullable final Map<String, String[]> customHeaders)
throws Exception {
RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo("/"));
assertHeaders(request, customHeaders);
}
private void assertMonitorTemplatesAndPipeline(final MockWebServer webServer,
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists)
throws Exception {
assertMonitorTemplatesAndPipeline(webServer, templateAlreadyExists, pipelineAlreadyExists, null);
}
private void assertMonitorTemplatesAndPipeline(final MockWebServer webServer,
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists,
@Nullable final Map<String, String[]> customHeaders) throws Exception {
assertMonitorVersion(webServer, customHeaders);
assertMonitorTemplates(webServer, templateAlreadyExists, customHeaders);
assertMonitorPipelines(webServer, pipelineAlreadyExists, customHeaders);
}
private void assertMonitorTemplates(final MockWebServer webServer, final boolean alreadyExists,
@Nullable final Map<String, String[]> customHeaders) throws Exception {
RecordedRequest request;
for (Map.Entry<String, String> template : monitoringTemplates().entrySet()) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo("/_template/" + template.getKey()));
assertHeaders(request, customHeaders);
if (alreadyExists == false) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("PUT"));
assertThat(request.getPath(), equalTo("/_template/" + template.getKey()));
assertThat(request.getBody().readUtf8(), equalTo(template.getValue()));
assertHeaders(request, customHeaders);
}
}
}
private void assertMonitorPipelines(final MockWebServer webServer, final boolean alreadyExists,
@Nullable final Map<String, String[]> customHeaders) throws Exception {
RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("GET"));
assertThat(request.getPath(), equalTo("/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME));
assertHeaders(request, customHeaders);
if (alreadyExists == false) {
request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("PUT"));
assertThat(request.getPath(), equalTo("/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME));
assertThat(request.getBody().readUtf8(), equalTo(Exporter.emptyPipeline(XContentType.JSON).string()));
assertHeaders(request, customHeaders);
}
}
private RecordedRequest assertBulk(final MockWebServer webServer) throws Exception {
return assertBulk(webServer, -1);
}
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs) throws Exception {
return assertBulk(webServer, docs, null);
}
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs, @Nullable final Map<String, String[]> customHeaders)
throws Exception {
RecordedRequest request = webServer.takeRequest();
assertThat(request.getMethod(), equalTo("POST"));
assertThat(request.getPath(), equalTo("/_bulk?pipeline=" + Exporter.EXPORT_PIPELINE_NAME));
assertHeaders(request, customHeaders);
if (docs != -1) {
assertBulkRequest(request.getBody(), docs);
}
return request;
}
private void assertHeaders(final RecordedRequest request, final Map<String, String[]> customHeaders) {
if (customHeaders != null) {
for (final Map.Entry<String, String[]> entry : customHeaders.entrySet()) {
final String header = entry.getKey();
final String[] values = entry.getValue();
final List<String> headerValues = request.getHeaders().values(header);
assertThat(header, headerValues, hasSize(values.length));
assertThat(header, headerValues, containsInAnyOrder(values));
}
}
}
public void testExport() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
.put("xpack.monitoring.exporters._http.update_mappings", false);
internalCluster().startNode(builder);
final int nbDocs = randomIntBetween(1, 25);
export(newRandomMonitoringDocs(nbDocs));
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
assertBulk(webServer, nbDocs);
}
public void testExportWithHeaders() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
final String headerValue = randomAsciiOfLengthBetween(3, 9);
final String[] array = generateRandomStringArray(2, 4, false);
final Map<String, String[]> headers = new HashMap<>();
headers.put("X-Cloud-Cluster", new String[] { headerValue });
headers.put("X-Found-Cluster", new String[] { headerValue });
headers.put("Array-Check", array);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
.put("xpack.monitoring.exporters._http.update_mappings", false)
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
internalCluster().startNode(builder);
final int nbDocs = randomIntBetween(1, 25);
export(newRandomMonitoringDocs(nbDocs));
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
assertBulk(webServer, nbDocs, headers);
}
public void testDynamicHostChange() {
// disable exporting to be able to use non valid hosts
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", "test0");
String nodeName = internalCluster().startNode(builder);
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.putArray("xpack.monitoring.exporters._http.host", "test1")));
assertThat(getExporter(nodeName).hosts, arrayContaining("test1"));
// wipes the non array settings
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.putArray("xpack.monitoring.exporters._http.host", "test2")
.put("xpack.monitoring.exporters._http.host", "")));
assertThat(getExporter(nodeName).hosts, arrayContaining("test2"));
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.putArray("xpack.monitoring.exporters._http.host", "test3")));
assertThat(getExporter(nodeName).hosts, arrayContaining("test3"));
}
public void testHostChangeReChecksTemplate() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
.put("xpack.monitoring.exporters._http.update_mappings", false);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
String agentNode = internalCluster().startNode(builder);
HttpExporter exporter = getExporter(agentNode);
assertThat(exporter.supportedClusterVersion, is(false));
export(Collections.singletonList(newRandomMonitoringDoc()));
assertThat(exporter.supportedClusterVersion, is(true));
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
assertBulk(webServer);
MockWebServer secondWebServer = null;
int secondWebPort;
try {
final int expectedPipelineCalls = expectedPipelineCalls(!pipelineExistsAlready);
for (secondWebPort = 9250; secondWebPort < 9300; secondWebPort++) {
try {
secondWebServer = new MockWebServer();
QueueDispatcher dispatcher = new QueueDispatcher();
dispatcher.setFailFast(true);
secondWebServer.setDispatcher(dispatcher);
secondWebServer.start(secondWebPort);
break;
} catch (BindException be) {
logger.warn("port [{}] was already in use trying next port", secondWebPort);
}
}
assertNotNull("Unable to start the second mock web server", secondWebServer);
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().putArray("xpack.monitoring.exporters._http.host",
secondWebServer.getHostName() + ":" + secondWebServer.getPort())).get());
// a new exporter is created on update, so we need to re-fetch it
exporter = getExporter(agentNode);
enqueueGetClusterVersionResponse(secondWebServer, Version.CURRENT);
for (String template : monitoringTemplates().keySet()) {
if (template.contains(MonitoringBulkTimestampedResolver.Data.DATA)) {
enqueueResponse(secondWebServer, 200, "template [" + template + "] exists");
} else {
enqueueResponse(secondWebServer, 404, "template [" + template + "] does not exist");
enqueueResponse(secondWebServer, 201, "template [" + template + "] created");
}
}
enqueuePipelineResponses(secondWebServer, !pipelineExistsAlready);
enqueueResponse(secondWebServer, 200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
logger.info("--> exporting a second event");
export(Collections.singletonList(newRandomMonitoringDoc()));
assertThat(secondWebServer.getRequestCount(), equalTo(2 + monitoringTemplates().size() * 2 - 1 + expectedPipelineCalls));
assertMonitorVersion(secondWebServer);
for (Map.Entry<String, String> template : monitoringTemplates().entrySet()) {
RecordedRequest recordedRequest = secondWebServer.takeRequest();
assertThat(recordedRequest.getMethod(), equalTo("GET"));
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.getKey()));
if (template.getKey().contains(MonitoringBulkTimestampedResolver.Data.DATA) == false) {
recordedRequest = secondWebServer.takeRequest();
assertThat(recordedRequest.getMethod(), equalTo("PUT"));
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.getKey()));
assertThat(recordedRequest.getBody().readUtf8(), equalTo(template.getValue()));
}
}
assertMonitorPipelines(secondWebServer, !pipelineExistsAlready, null);
assertBulk(secondWebServer);
} finally {
if (secondWebServer != null) {
secondWebServer.shutdown();
}
}
}
public void testUnsupportedClusterVersion() throws Exception {
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters._http.connection.keep_alive", false);
// returning an unsupported cluster version
enqueueGetClusterVersionResponse(randomFrom(Version.fromString("0.18.0"), Version.fromString("1.0.0"),
Version.fromString("1.4.0")));
String agentNode = internalCluster().startNode(builder);
HttpExporter exporter = getExporter(agentNode);
assertThat(exporter.supportedClusterVersion, is(false));
assertNull(exporter.openBulk());
assertThat(exporter.supportedClusterVersion, is(false));
assertThat(webServer.getRequestCount(), equalTo(1));
assertMonitorVersion(webServer);
}
public void testDynamicIndexFormatChange() throws Exception {
final boolean templatesExistsAlready = randomBoolean();
final boolean pipelineExistsAlready = randomBoolean();
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
.put("xpack.monitoring.exporters._http.update_mappings", false);
String agentNode = internalCluster().startNode(builder);
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
HttpExporter exporter = getExporter(agentNode);
MonitoringDoc doc = newRandomMonitoringDoc();
export(Collections.singletonList(doc));
final int expectedRequests = 2 + expectedTemplateAndPipelineCalls;
assertThat(webServer.getRequestCount(), equalTo(expectedRequests));
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
RecordedRequest recordedRequest = assertBulk(webServer);
String indexName = exporter.getResolvers().getResolver(doc).index(doc);
byte[] bytes = recordedRequest.getBody().readByteArray();
Map<String, Object> data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
Map<String, Object> index = (Map<String, Object>) data.get("index");
assertThat(index.get("_index"), equalTo(indexName));
String newTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM");
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
.put("xpack.monitoring.exporters._http.index.name.time_format", newTimeFormat)));
enqueueGetClusterVersionResponse(Version.CURRENT);
enqueueTemplateAndPipelineResponses(webServer, true, true);
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
doc = newRandomMonitoringDoc();
export(Collections.singletonList(doc));
String expectedMonitoringIndex = ".monitoring-es-" + MonitoringTemplateUtils.TEMPLATE_VERSION + "-"
+ DateTimeFormat.forPattern(newTimeFormat).withZoneUTC().print(doc.getTimestamp());
final int expectedTemplatesAndPipelineExists = expectedTemplateAndPipelineCalls(true, true);
assertThat(webServer.getRequestCount(), equalTo(expectedRequests + 2 + expectedTemplatesAndPipelineExists));
assertMonitorTemplatesAndPipeline(webServer, true, true);
recordedRequest = assertBulk(webServer);
bytes = recordedRequest.getBody().readByteArray();
data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
index = (Map<String, Object>) data.get("index");
assertThat(index.get("_index"), equalTo(expectedMonitoringIndex));
}
public void testLoadRemoteClusterVersion() throws IOException {
final String host = webServer.getHostName() + ":" + webServer.getPort();
Settings.Builder builder = Settings.builder()
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", host)
.put("xpack.monitoring.exporters._http.connection.keep_alive", false);
String agentNode = internalCluster().startNode(builder);
HttpExporter exporter = getExporter(agentNode);
enqueueGetClusterVersionResponse(Version.CURRENT);
Version resolved = exporter.loadRemoteClusterVersion(host);
assertTrue(resolved.equals(Version.CURRENT));
final Version expected = randomFrom(Version.CURRENT, Version.V_2_0_0_beta1, Version.V_2_0_0_beta2, Version.V_2_0_0_rc1,
Version.V_2_0_0, Version.V_2_1_0, Version.V_2_2_0, Version.V_2_3_0);
enqueueGetClusterVersionResponse(expected);
resolved = exporter.loadRemoteClusterVersion(host);
assertTrue(resolved.equals(expected));
}
private void export(Collection<MonitoringDoc> docs) throws Exception {
Exporters exporters = internalCluster().getInstance(Exporters.class);
assertThat(exporters, notNullValue());
// Wait for exporting bulks to be ready to export
assertBusy(() -> exporters.forEach(exporter -> assertThat(exporter.openBulk(), notNullValue())));
exporters.export(docs);
}
private HttpExporter getExporter(String nodeName) {
Exporters exporters = internalCluster().getInstance(Exporters.class, nodeName);
return (HttpExporter) exporters.iterator().next();
}
private MonitoringDoc newRandomMonitoringDoc() {
if (randomBoolean()) {
IndexRecoveryMonitoringDoc doc = new IndexRecoveryMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
doc.setClusterUUID(internalCluster().getClusterName());
doc.setTimestamp(System.currentTimeMillis());
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
doc.setRecoveryResponse(new RecoveryResponse());
return doc;
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
}
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(), equalTo(expected));
}
public void testExporterWithEmptyHeaders() {
final String name = randomFrom("abc", "ABC", "X-Flag");
final String expected = "headers must have values, missing for setting [xpack.monitoring.exporters._http.headers." + name + "]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
.put("xpack.monitoring.exporters._http.headers." + name, "");
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
}
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(), equalTo(expected));
}
public void testExporterWithPasswordButNoUsername() {
final String expected =
"[xpack.monitoring.exporters._http.auth.password] without [xpack.monitoring.exporters._http.auth.username]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
.put("xpack.monitoring.exporters._http.auth.password", "_pass");
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(), equalTo(expected));
}
public void testExporterWithMissingHost() {
// forgot host!
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.host", "");
} else if (randomBoolean()) {
builder.putArray("xpack.monitoring.exporters._http.host");
} else if (randomBoolean()) {
builder.putNull("xpack.monitoring.exporters._http.host");
}
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(), equalTo("missing required setting [xpack.monitoring.exporters._http.host]"));
}
public void testExporterWithInconsistentSchemes() {
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.putArray("xpack.monitoring.exporters._http.host", "http://localhost:9200", "https://localhost:9201");
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(),
equalTo("[xpack.monitoring.exporters._http.host] must use a consistent scheme: http or https"));
}
public void testExporterWithInvalidHost() {
final String invalidHost = randomFrom("://localhost:9200", "gopher!://xyz.my.com");
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
// sometimes add a valid URL with it
if (randomBoolean()) {
if (randomBoolean()) {
builder.putArray("xpack.monitoring.exporters._http.host", "localhost:9200", invalidHost);
} else {
builder.putArray("xpack.monitoring.exporters._http.host", invalidHost, "localhost:9200");
}
} else {
ClusterStateMonitoringDoc doc = new ClusterStateMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
doc.setClusterUUID(internalCluster().getClusterName());
doc.setTimestamp(System.currentTimeMillis());
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
doc.setClusterState(ClusterState.PROTO);
doc.setStatus(ClusterHealthStatus.GREEN);
return doc;
builder.put("xpack.monitoring.exporters._http.host", invalidHost);
}
final Config config = createConfig(builder.build());
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
assertThat(exception.getMessage(), equalTo("[xpack.monitoring.exporters._http.host] invalid host: [" + invalidHost + "]"));
}
public void testExporterWithHostOnly() throws Exception {
final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
when(sslService.sslIOSessionStrategy(any(Settings.class))).thenReturn(sslStrategy);
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
final Config config = createConfig(builder.build());
new HttpExporter(config, sslService).close();
}
public void testCreateRestClient() throws IOException {
final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
when(sslService.sslIOSessionStrategy(any(Settings.class))).thenReturn(sslStrategy);
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
// use basic auth
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.auth.username", "_user")
.put("xpack.monitoring.exporters._http.auth.password", "_pass");
}
// use headers
if (randomBoolean()) {
builder.put("xpack.monitoring.exporters._http.headers.abc", "xyz");
}
final Config config = createConfig(builder.build());
final NodeFailureListener listener = mock(NodeFailureListener.class);
// doesn't explode
HttpExporter.createRestClient(config, sslService, listener).close();
}
public void testCreateSnifferDisabledByDefault() {
final Config config = createConfig(Settings.EMPTY);
final RestClient client = mock(RestClient.class);
final NodeFailureListener listener = mock(NodeFailureListener.class);
assertThat(HttpExporter.createSniffer(config, client, listener), nullValue());
verifyZeroInteractions(client, listener);
}
public void testCreateSnifferWithoutHosts() {
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.sniff.enabled", true);
final Config config = createConfig(builder.build());
final RestClient client = mock(RestClient.class);
final NodeFailureListener listener = mock(NodeFailureListener.class);
expectThrows(IndexOutOfBoundsException.class, () -> HttpExporter.createSniffer(config, client, listener));
}
public void testCreateSniffer() throws IOException {
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http")
// it's a simple check: does it start with "https"?
.put("xpack.monitoring.exporters._http.host", randomFrom("neither", "http", "https"))
.put("xpack.monitoring.exporters._http.sniff.enabled", true);
final Config config = createConfig(builder.build());
final RestClient client = mock(RestClient.class);
final NodeFailureListener listener = mock(NodeFailureListener.class);
final Response response = mock(Response.class);
final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON);
when(response.getEntity()).thenReturn(entity);
when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response);
try (final Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) {
assertThat(sniffer, not(nullValue()));
verify(listener).setSniffer(sniffer);
}
// it's a race whether it triggers this at all
verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class));
verifyNoMoreInteractions(client, listener);
}
public void testCreateResources() {
final boolean useIngest = randomBoolean();
final TimeValue templateTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
final TimeValue pipelineTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http");
if (useIngest == false) {
builder.put("xpack.monitoring.exporters._http.use_ingest", false);
}
if (templateTimeout != null) {
builder.put("xpack.monitoring.exporters._http.index.template.master_timeout", templateTimeout.toString());
}
// note: this shouldn't get used with useIngest == false, but it doesn't hurt to try to cause issues
if (pipelineTimeout != null) {
builder.put("xpack.monitoring.exporters._http.index.pipeline.master_timeout", pipelineTimeout.toString());
}
final Config config = createConfig(builder.build());
final MultiHttpResource multiResource = HttpExporter.createResources(config, new ResolversRegistry(config.settings()));
final List<HttpResource> resources = multiResource.getResources();
final int version = (int)resources.stream().filter((resource) -> resource instanceof VersionHttpResource).count();
final List<TemplateHttpResource> templates =
resources.stream().filter((resource) -> resource instanceof TemplateHttpResource)
.map(TemplateHttpResource.class::cast)
.collect(Collectors.toList());
final List<PipelineHttpResource> pipelines =
resources.stream().filter((resource) -> resource instanceof PipelineHttpResource)
.map(PipelineHttpResource.class::cast)
.collect(Collectors.toList());
// expected number of resources
assertThat(multiResource.getResources().size(), equalTo(version + templates.size() + pipelines.size()));
assertThat(version, equalTo(1));
assertThat(templates, hasSize(3));
assertThat(pipelines, hasSize(useIngest ? 1 : 0));
// timeouts
assertMasterTimeoutSet(templates, templateTimeout);
assertMasterTimeoutSet(pipelines, pipelineTimeout);
// logging owner names
final List<String> uniqueOwners =
resources.stream().map(HttpResource::getResourceOwnerName).distinct().collect(Collectors.toList());
assertThat(uniqueOwners, hasSize(1));
assertThat(uniqueOwners.get(0), equalTo("xpack.monitoring.exporters._http"));
}
public void testCreateDefaultParams() {
final TimeValue bulkTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
final boolean useIngest = randomBoolean();
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", "http");
if (bulkTimeout != null) {
builder.put("xpack.monitoring.exporters._http.bulk.timeout", bulkTimeout.toString());
}
if (useIngest == false) {
builder.put("xpack.monitoring.exporters._http.use_ingest", false);
}
final Config config = createConfig(builder.build());
final Map<String, String> parameters = new HashMap<>(HttpExporter.createDefaultParams(config));
assertThat(parameters.remove("filter_path"), equalTo("errors,items.*.error"));
if (bulkTimeout != null) {
assertThat(parameters.remove("master_timeout"), equalTo(bulkTimeout.toString()));
}
if (useIngest) {
assertThat(parameters.remove("pipeline"), equalTo(Exporter.EXPORT_PIPELINE_NAME));
}
// should have removed everything
assertThat(parameters.size(), equalTo(0));
}
public void testHttpExporterDirtyResourcesBlock() throws Exception {
final Config config = createConfig(Settings.EMPTY);
final RestClient client = mock(RestClient.class);
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
final NodeFailureListener listener = mock(NodeFailureListener.class);
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
final HttpResource resource = new MockHttpResource(exporterName(), true, PublishableHttpResource.CheckResponse.ERROR, false);
try (final HttpExporter exporter = new HttpExporter(config, client, sniffer, listener, resolvers, resource)) {
verify(listener).setResource(resource);
assertThat(exporter.openBulk(), nullValue());
}
}
private List<MonitoringDoc> newRandomMonitoringDocs(int nb) {
List<MonitoringDoc> docs = new ArrayList<>(nb);
for (int i = 0; i < nb; i++) {
docs.add(newRandomMonitoringDoc());
public void testHttpExporter() throws Exception {
final Config config = createConfig(Settings.EMPTY);
final RestClient client = mock(RestClient.class);
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
final NodeFailureListener listener = mock(NodeFailureListener.class);
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
// sometimes dirty to start with and sometimes not; but always succeeds on checkAndPublish
final HttpResource resource = new MockHttpResource(exporterName(), randomBoolean());
try (final HttpExporter exporter = new HttpExporter(config, client, sniffer, listener, resolvers, resource)) {
verify(listener).setResource(resource);
final HttpExportBulk bulk = exporter.openBulk();
assertThat(bulk.getName(), equalTo(exporterName()));
}
return docs;
}
private void enqueueGetClusterVersionResponse(Version v) throws IOException {
enqueueGetClusterVersionResponse(webServer, v);
}
public void testHttpExporterShutdown() throws Exception {
final Config config = createConfig(Settings.EMPTY);
final RestClient client = mock(RestClient.class);
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
final NodeFailureListener listener = mock(NodeFailureListener.class);
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
final MultiHttpResource resource = mock(MultiHttpResource.class);
private void enqueueGetClusterVersionResponse(MockWebServer mockWebServer, Version v) throws IOException {
mockWebServer.enqueue(new MockResponse().setResponseCode(200).setBody(
jsonBuilder().startObject().startObject("version").field("number", v.toString()).endObject().endObject().bytes()
.utf8ToString()));
}
if (sniffer != null && rarely()) {
doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(sniffer).close();
}
private void enqueueTemplateAndPipelineResponses(final MockWebServer webServer,
final boolean templatesAlreadyExists, final boolean pipelineAlreadyExists)
throws IOException {
enqueueTemplateResponses(webServer, templatesAlreadyExists);
enqueuePipelineResponses(webServer, pipelineAlreadyExists);
}
if (rarely()) {
doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(client).close();
}
private void enqueueTemplateResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
if (alreadyExists) {
enqueueTemplateResponsesExistsAlready(webServer);
new HttpExporter(config, client, sniffer, listener, resolvers, resource).close();
// order matters; sniffer must close first
if (sniffer != null) {
final InOrder inOrder = inOrder(sniffer, client);
inOrder.verify(sniffer).close();
inOrder.verify(client).close();
} else {
enqueueTemplateResponsesDoesNotExistYet(webServer);
verify(client).close();
}
}
private void enqueueTemplateResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
for (String template : monitoringTemplates().keySet()) {
enqueueResponse(webServer, 404, "template [" + template + "] does not exist");
enqueueResponse(webServer, 201, "template [" + template + "] created");
private void assertMasterTimeoutSet(final List<? extends PublishableHttpResource> resources, final TimeValue timeout) {
if (timeout != null) {
for (final PublishableHttpResource resource : resources) {
assertThat(resource.getParameters().get("master_timeout"), equalTo(timeout.toString()));
}
}
}
private void enqueueTemplateResponsesExistsAlready(final MockWebServer webServer) throws IOException {
for (String template : monitoringTemplates().keySet()) {
enqueueResponse(webServer, 200, "template [" + template + "] exists");
}
/**
* Create the {@link Config} named "_http" and select those settings from {@code settings}.
*
* @param settings The settings to select the exporter's settings from
* @return Never {@code null}.
*/
private static Config createConfig(Settings settings) {
return new Config("_http", HttpExporter.TYPE, settings.getAsSettings(exporterName()));
}
private void enqueuePipelineResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
if (alreadyExists) {
enqueuePipelineResponsesExistsAlready(webServer);
} else {
enqueuePipelineResponsesDoesNotExistYet(webServer);
}
private static String exporterName() {
return "xpack.monitoring.exporters._http";
}
private void enqueuePipelineResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
enqueueResponse(webServer, 404, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] does not exist");
enqueueResponse(webServer, 201, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] created");
}
private void enqueuePipelineResponsesExistsAlready(final MockWebServer webServer) throws IOException {
enqueueResponse(webServer, 200, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] exists");
}
private void enqueueResponse(int responseCode, String body) throws IOException {
enqueueResponse(webServer, responseCode, body);
}
private void enqueueResponse(MockWebServer mockWebServer, int responseCode, String body) throws IOException {
mockWebServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body));
}
private void assertBulkRequest(Buffer requestBody, int numberOfActions) throws Exception {
BulkRequest bulkRequest = Requests.bulkRequest().add(new BytesArray(requestBody.readByteArray()), null, null);
assertThat(bulkRequest.numberOfActions(), equalTo(numberOfActions));
for (ActionRequest actionRequest : bulkRequest.requests()) {
assertThat(actionRequest, instanceOf(IndexRequest.class));
}
}
}

View File

@ -1,75 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.test.ESTestCase;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import static org.hamcrest.CoreMatchers.equalTo;
public class HttpExporterUtilsTests extends ESTestCase {
public void testHostParsing() throws MalformedURLException, URISyntaxException {
URL url = HttpExporterUtils.parseHostWithPath("localhost:9200", "");
verifyUrl(url, "http", "localhost", 9200, "/");
url = HttpExporterUtils.parseHostWithPath("localhost", "_bulk");
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("http://localhost:9200", "_bulk");
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("http://localhost", "_bulk");
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("https://localhost:9200", "_bulk");
verifyUrl(url, "https", "localhost", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("https://boaz-air.local:9200", "_bulk");
verifyUrl(url, "https", "boaz-air.local", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("localhost:9200/suburl", "");
verifyUrl(url, "http", "localhost", 9200, "/suburl/");
url = HttpExporterUtils.parseHostWithPath("localhost/suburl", "_bulk");
verifyUrl(url, "http", "localhost", 9200, "/suburl/_bulk");
url = HttpExporterUtils.parseHostWithPath("http://localhost:9200/suburl/suburl1", "_bulk");
verifyUrl(url, "http", "localhost", 9200, "/suburl/suburl1/_bulk");
url = HttpExporterUtils.parseHostWithPath("https://localhost:9200/suburl", "_bulk");
verifyUrl(url, "https", "localhost", 9200, "/suburl/_bulk");
url = HttpExporterUtils.parseHostWithPath("https://server_with_underscore:9300", "_bulk");
verifyUrl(url, "https", "server_with_underscore", 9300, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("server_with_underscore:9300", "_bulk");
verifyUrl(url, "http", "server_with_underscore", 9300, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("server_with_underscore", "_bulk");
verifyUrl(url, "http", "server_with_underscore", 9200, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("https://server-dash:9300", "_bulk");
verifyUrl(url, "https", "server-dash", 9300, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("server-dash:9300", "_bulk");
verifyUrl(url, "http", "server-dash", 9300, "/_bulk");
url = HttpExporterUtils.parseHostWithPath("server-dash", "_bulk");
verifyUrl(url, "http", "server-dash", 9200, "/_bulk");
}
void verifyUrl(URL url, String protocol, String host, int port, String path) throws URISyntaxException {
assertThat(url.getProtocol(), equalTo(protocol));
assertThat(url.getHost(), equalTo(host));
assertThat(url.getPort(), equalTo(port));
assertThat(url.toURI().getPath(), equalTo(path));
}
}

View File

@ -0,0 +1,171 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests {@link HttpHostBuilder}.
*/
public class HttpHostBuilderTests extends ESTestCase {
private final Scheme scheme = randomFrom(Scheme.values());
private final String hostname = randomAsciiOfLengthBetween(1, 20);
private final int port = randomIntBetween(1, 65535);
public void testBuilder() {
assertHttpHost(HttpHostBuilder.builder(hostname), Scheme.HTTP, hostname, 9200);
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname), scheme, hostname, 9200);
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname + ":" + port), scheme, hostname, port);
// weird port, but I don't expect it to explode
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname + ":-1"), scheme, hostname, 9200);
// port without scheme
assertHttpHost(HttpHostBuilder.builder(hostname + ":" + port), Scheme.HTTP, hostname, port);
// fairly ordinary
assertHttpHost(HttpHostBuilder.builder("localhost"), Scheme.HTTP, "localhost", 9200);
assertHttpHost(HttpHostBuilder.builder("localhost:9200"), Scheme.HTTP, "localhost", 9200);
assertHttpHost(HttpHostBuilder.builder("http://localhost"), Scheme.HTTP, "localhost", 9200);
assertHttpHost(HttpHostBuilder.builder("http://localhost:9200"), Scheme.HTTP, "localhost", 9200);
assertHttpHost(HttpHostBuilder.builder("https://localhost:9200"), Scheme.HTTPS, "localhost", 9200);
assertHttpHost(HttpHostBuilder.builder("https://boaz-air.local:9200"), Scheme.HTTPS, "boaz-air.local", 9200);
assertHttpHost(HttpHostBuilder.builder("https://server-dash:19200"), Scheme.HTTPS, "server-dash", 19200);
assertHttpHost(HttpHostBuilder.builder("server-dash:19200"), Scheme.HTTP, "server-dash", 19200);
assertHttpHost(HttpHostBuilder.builder("server-dash"), Scheme.HTTP, "server-dash", 9200);
assertHttpHost(HttpHostBuilder.builder("sub.domain"), Scheme.HTTP, "sub.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("http://sub.domain"), Scheme.HTTP, "sub.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("http://sub.domain:9200"), Scheme.HTTP, "sub.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("https://sub.domain:9200"), Scheme.HTTPS, "sub.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("https://sub.domain:19200"), Scheme.HTTPS, "sub.domain", 19200);
// ipv4
assertHttpHost(HttpHostBuilder.builder("127.0.0.1"), Scheme.HTTP, "127.0.0.1", 9200);
assertHttpHost(HttpHostBuilder.builder("127.0.0.1:19200"), Scheme.HTTP, "127.0.0.1", 19200);
assertHttpHost(HttpHostBuilder.builder("http://127.0.0.1"), Scheme.HTTP, "127.0.0.1", 9200);
assertHttpHost(HttpHostBuilder.builder("http://127.0.0.1:9200"), Scheme.HTTP, "127.0.0.1", 9200);
assertHttpHost(HttpHostBuilder.builder("https://127.0.0.1:9200"), Scheme.HTTPS, "127.0.0.1", 9200);
assertHttpHost(HttpHostBuilder.builder("https://127.0.0.1:19200"), Scheme.HTTPS, "127.0.0.1", 19200);
// ipv6
assertHttpHost(HttpHostBuilder.builder("[::1]"), Scheme.HTTP, "[::1]", 9200);
assertHttpHost(HttpHostBuilder.builder("[::1]:19200"), Scheme.HTTP, "[::1]", 19200);
assertHttpHost(HttpHostBuilder.builder("http://[::1]"), Scheme.HTTP, "[::1]", 9200);
assertHttpHost(HttpHostBuilder.builder("http://[::1]:9200"), Scheme.HTTP, "[::1]", 9200);
assertHttpHost(HttpHostBuilder.builder("https://[::1]:9200"), Scheme.HTTPS, "[::1]", 9200);
assertHttpHost(HttpHostBuilder.builder("https://[::1]:19200"), Scheme.HTTPS, "[::1]", 19200);
assertHttpHost(HttpHostBuilder.builder("[fdda:5cc1:23:4::1f]"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
assertHttpHost(HttpHostBuilder.builder("http://[fdda:5cc1:23:4::1f]"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
assertHttpHost(HttpHostBuilder.builder("http://[fdda:5cc1:23:4::1f]:9200"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
assertHttpHost(HttpHostBuilder.builder("https://[fdda:5cc1:23:4::1f]:9200"), Scheme.HTTPS, "[fdda:5cc1:23:4::1f]", 9200);
assertHttpHost(HttpHostBuilder.builder("https://[fdda:5cc1:23:4::1f]:19200"), Scheme.HTTPS, "[fdda:5cc1:23:4::1f]", 19200);
// underscores
assertHttpHost(HttpHostBuilder.builder("server_with_underscore"), Scheme.HTTP, "server_with_underscore", 9200);
assertHttpHost(HttpHostBuilder.builder("server_with_underscore:19200"), Scheme.HTTP, "server_with_underscore", 19200);
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore"), Scheme.HTTP, "server_with_underscore", 9200);
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore:9200"), Scheme.HTTP, "server_with_underscore", 9200);
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore:19200"), Scheme.HTTP, "server_with_underscore", 19200);
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore"), Scheme.HTTPS, "server_with_underscore", 9200);
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore:9200"), Scheme.HTTPS, "server_with_underscore", 9200);
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore:19200"), Scheme.HTTPS, "server_with_underscore", 19200);
assertHttpHost(HttpHostBuilder.builder("_prefix.domain"), Scheme.HTTP, "_prefix.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("_prefix.domain:19200"), Scheme.HTTP, "_prefix.domain", 19200);
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain"), Scheme.HTTP, "_prefix.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain:9200"), Scheme.HTTP, "_prefix.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain:19200"), Scheme.HTTP, "_prefix.domain", 19200);
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain"), Scheme.HTTPS, "_prefix.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain:9200"), Scheme.HTTPS, "_prefix.domain", 9200);
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain:19200"), Scheme.HTTPS, "_prefix.domain", 19200);
}
public void testManualBuilder() {
assertHttpHost(HttpHostBuilder.builder().host(hostname), Scheme.HTTP, hostname, 9200);
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname), scheme, hostname, 9200);
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname).port(port), scheme, hostname, port);
// unset the port (not normal, but ensuring it works)
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname).port(port).port(-1), scheme, hostname, 9200);
// port without scheme
assertHttpHost(HttpHostBuilder.builder().host(hostname).port(port), Scheme.HTTP, hostname, port);
}
public void testBuilderNullUri() {
final NullPointerException e = expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder(null));
assertThat(e.getMessage(), equalTo("uri must not be null"));
}
public void testUnknownScheme() {
assertBuilderBadSchemeThrows("htp://localhost:9200", "htp");
assertBuilderBadSchemeThrows("htttp://localhost:9200", "htttp");
assertBuilderBadSchemeThrows("httpd://localhost:9200", "httpd");
assertBuilderBadSchemeThrows("ws://localhost:9200", "ws");
assertBuilderBadSchemeThrows("wss://localhost:9200", "wss");
assertBuilderBadSchemeThrows("ftp://localhost:9200", "ftp");
assertBuilderBadSchemeThrows("gopher://localhost:9200", "gopher");
assertBuilderBadSchemeThrows("localhost://9200", "localhost");
}
public void testPathIsBlocked() {
assertBuilderPathThrows("http://localhost:9200/", "/");
assertBuilderPathThrows("http://localhost:9200/sub", "/sub");
assertBuilderPathThrows("http://localhost:9200/sub/path", "/sub/path");
}
public void testBuildWithoutHost() {
final IllegalStateException e = expectThrows(IllegalStateException.class, () -> HttpHostBuilder.builder().build());
assertThat(e.getMessage(), equalTo("host must be set"));
}
public void testNullScheme() {
expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder().scheme(null));
}
public void testNullHost() {
expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder().host(null));
}
public void testBadPort() {
assertPortThrows(0);
assertPortThrows(65536);
assertPortThrows(randomIntBetween(Integer.MIN_VALUE, -2));
assertPortThrows(randomIntBetween(65537, Integer.MAX_VALUE));
}
private void assertHttpHost(final HttpHostBuilder host, final Scheme scheme, final String hostname, final int port) {
assertHttpHost(host.build(), scheme, hostname, port);
}
private void assertHttpHost(final HttpHost host, final Scheme scheme, final String hostname, final int port) {
assertThat(host.getSchemeName(), equalTo(scheme.toString()));
assertThat(host.getHostName(), equalTo(hostname));
assertThat(host.getPort(), equalTo(port));
}
private void assertBuilderPathThrows(final String uri, final String path) {
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder(uri));
assertThat(e.getMessage(), containsString("[" + path + "]"));
}
private void assertBuilderBadSchemeThrows(final String uri, final String scheme) {
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder(uri));
assertThat(e.getMessage(), containsString(scheme));
}
private void assertPortThrows(final int port) {
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder().port(port));
assertThat(e.getMessage(), containsString(Integer.toString(port)));
}
}

View File

@ -0,0 +1,129 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.test.ESTestCase;
import java.util.function.Supplier;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Tests {@link HttpResource}.
*/
public class HttpResourceTests extends ESTestCase {
private final String owner = getTestName();
private final RestClient client = mock(RestClient.class);
public void testConstructorRequiresOwner() {
expectThrows(NullPointerException.class, () -> new HttpResource(null) {
@Override
protected boolean doCheckAndPublish(RestClient client) {
return false;
}
});
}
public void testConstructor() {
final HttpResource resource = new HttpResource(owner) {
@Override
protected boolean doCheckAndPublish(RestClient client) {
return false;
}
};
assertSame(owner, resource.resourceOwnerName);
assertTrue(resource.isDirty());
}
public void testConstructorDirtiness() {
final boolean dirty = randomBoolean();
final HttpResource resource = new HttpResource(owner, dirty) {
@Override
protected boolean doCheckAndPublish(RestClient client) {
return false;
}
};
assertSame(owner, resource.resourceOwnerName);
assertEquals(dirty, resource.isDirty());
}
public void testDirtiness() {
// MockHttpResponse always succeeds for checkAndPublish
final HttpResource resource = new MockHttpResource(owner);
assertTrue(resource.isDirty());
resource.markDirty();
assertTrue(resource.isDirty());
// if this fails, then the mocked resource needs to be fixed
assertTrue(resource.checkAndPublish(client));
assertFalse(resource.isDirty());
}
public void testCheckAndPublish() {
final boolean expected = randomBoolean();
// the default dirtiness should be irrelevant; it should always be run!
final HttpResource resource = new HttpResource(owner) {
@Override
protected boolean doCheckAndPublish(final RestClient client) {
return expected;
}
};
assertEquals(expected, resource.checkAndPublish(client));
}
public void testCheckAndPublishEvenWhenDirty() {
final Supplier<Boolean> supplier = mock(Supplier.class);
when(supplier.get()).thenReturn(true, false);
final HttpResource resource = new HttpResource(owner) {
@Override
protected boolean doCheckAndPublish(final RestClient client) {
return supplier.get();
}
};
assertTrue(resource.isDirty());
assertTrue(resource.checkAndPublish(client));
assertFalse(resource.isDirty());
assertFalse(resource.checkAndPublish(client));
verify(supplier, times(2)).get();
}
public void testCheckAndPublishIfDirty() {
@SuppressWarnings("unchecked")
final Supplier<Boolean> supplier = mock(Supplier.class);
when(supplier.get()).thenReturn(true, false);
final HttpResource resource = new HttpResource(owner) {
@Override
protected boolean doCheckAndPublish(final RestClient client) {
return supplier.get();
}
};
assertTrue(resource.isDirty());
assertTrue(resource.checkAndPublishIfDirty(client));
assertFalse(resource.isDirty());
assertTrue(resource.checkAndPublishIfDirty(client));
// once is the default!
verify(supplier).get();
}
}

View File

@ -0,0 +1,118 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Collections;
import java.util.Map;
/**
* {@code MockHttpResource} the {@linkplain HttpResource#isDirty() dirtiness} to be defaulted.
*/
public class MockHttpResource extends PublishableHttpResource {
public final CheckResponse check;
public final boolean publish;
public int checked = 0;
public int published = 0;
/**
* Create a new {@link MockHttpResource} that starts dirty, but always succeeds.
*
* @param resourceOwnerName The user-recognizable name
*/
public MockHttpResource(final String resourceOwnerName) {
this(resourceOwnerName, true, CheckResponse.EXISTS, true);
}
/**
* Create a new {@link MockHttpResource} that starts {@code dirty}, but always succeeds.
*
* @param resourceOwnerName The user-recognizable name
* @param dirty The starting dirtiness of the resource.
*/
public MockHttpResource(final String resourceOwnerName, final boolean dirty) {
this(resourceOwnerName, dirty, CheckResponse.EXISTS, true);
}
/**
* Create a new {@link MockHttpResource} that starts dirty, but always succeeds.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
* @param parameters The base parameters to specify for the request.
*/
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters) {
this(resourceOwnerName, masterTimeout, parameters, true, CheckResponse.EXISTS, true);
}
/**
* Create a new {@link MockHttpResource} that starts {@code dirty}.
*
* @param resourceOwnerName The user-recognizable name
* @param dirty The starting dirtiness of the resource.
* @param check The expected response when checking for the resource.
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
*/
public MockHttpResource(final String resourceOwnerName, final boolean dirty, final CheckResponse check, final boolean publish) {
this(resourceOwnerName, null, Collections.emptyMap(), dirty, check, publish);
}
/**
* Create a new {@link MockHttpResource} that starts dirty.
*
* @param resourceOwnerName The user-recognizable name
* @param check The expected response when checking for the resource.
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
* @param masterTimeout Master timeout to use with any request.
* @param parameters The base parameters to specify for the request.
*/
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters,
final CheckResponse check, final boolean publish) {
this(resourceOwnerName, masterTimeout, parameters, true, check, publish);
}
/**
* Create a new {@link MockHttpResource}.
*
* @param resourceOwnerName The user-recognizable name
* @param dirty The starting dirtiness of the resource.
* @param check The expected response when checking for the resource.
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
* @param masterTimeout Master timeout to use with any request.
* @param parameters The base parameters to specify for the request.
*/
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters,
final boolean dirty, final CheckResponse check, final boolean publish) {
super(resourceOwnerName, masterTimeout, parameters, dirty);
this.check = check;
this.publish = publish;
}
@Override
protected CheckResponse doCheck(final RestClient client) {
assert client != null;
++checked;
return check;
}
@Override
protected boolean doPublish(final RestClient client) {
assert client != null;
++published;
return publish;
}
}

View File

@ -0,0 +1,128 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import com.squareup.okhttp.mockwebserver.MockWebServer;
import com.squareup.okhttp.mockwebserver.QueueDispatcher;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.net.BindException;
import java.util.ArrayList;
import java.util.List;
/**
* {@code MockWebServerContainer} wraps a {@link MockWebServer} to avoid forcing every usage of it to do the same thing.
*/
public class MockWebServerContainer implements AutoCloseable {
private static Logger logger = Loggers.getLogger(MockWebServerContainer.class);
/**
* The running {@link MockWebServer}.
*/
private final MockWebServer server;
/**
* Create a {@link MockWebServerContainer} that uses a port from [{@code 9250}, {code 9300}).
*
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
*/
public MockWebServerContainer() {
this(9250, 9300);
}
/**
* Create a {@link MockWebServerContainer} that uses a port from [{@code startPort}, {code 9300}).
* <p>
* This is useful if you need to test with two {@link MockWebServer}s, so you can simply skip the port of the existing one.
*
* @param startPort The first port to try (inclusive).
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
*/
public MockWebServerContainer(final int startPort) {
this(startPort, 9300);
}
/**
* Create a {@link MockWebServerContainer} that uses a port from [{@code startPort}, {code endPort}).
*
* @param startPort The first port to try (inclusive).
* @param endPort The last port to try (exclusive).
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
*/
public MockWebServerContainer(final int startPort, final int endPort) {
final List<Integer> failedPorts = new ArrayList<>(0);
final QueueDispatcher dispatcher = new QueueDispatcher();
dispatcher.setFailFast(true);
MockWebServer webServer = null;
for (int port = startPort; port < endPort; ++port) {
try {
webServer = new MockWebServer();
webServer.setDispatcher(dispatcher);
webServer.start(port);
break;
} catch (final BindException e) {
failedPorts.add(port);
webServer = null;
} catch (final IOException e) {
logger.error("unrecoverable failure while trying to start MockWebServer with port [{}]", e, port);
throw new ElasticsearchException(e);
}
}
if (webServer != null) {
this.server = webServer;
if (failedPorts.isEmpty() == false) {
logger.warn("ports [{}] were already in use. using port [{}]", failedPorts, webServer.getPort());
}
} else {
throw new ElasticsearchException("unable to find open port between [" + startPort + "] and [" + endPort + "]");
}
}
/**
* Get the {@link MockWebServer} created by this container.
*
* @return Never {@code null}.
*/
public MockWebServer getWebServer() {
return server;
}
/**
* Get the port used by the running web server.
*
* @return The local port used by the {@linkplain #getWebServer() web server}.
*/
public int getPort() {
return server.getPort();
}
/**
* Get the formatted address in the form of "hostname:port".
*
* @return Never {@code null}.
*/
public String getFormattedAddress() {
return server.getHostName() + ":" + server.getPort();
}
/**
* Shutdown the {@linkplain #getWebServer() web server}.
*/
@Override
public void close() throws Exception {
server.shutdown();
}
}

View File

@ -0,0 +1,99 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.mock;
/**
* Tests {@link MultiHttpResource}.
*/
public class MultiHttpResourceTests extends ESTestCase {
private final String owner = getClass().getSimpleName();
private final RestClient client = mock(RestClient.class);
public void testDoCheckAndPublish() {
final List<MockHttpResource> allResources = successfulResources();
final MultiHttpResource multiResource = new MultiHttpResource(owner, allResources);
assertTrue(multiResource.doCheckAndPublish(client));
for (final MockHttpResource resource : allResources) {
assertSuccessfulResource(resource);
}
}
public void testDoCheckAndPublishShortCircuits() {
// fail either the check or the publish
final CheckResponse check = randomFrom(CheckResponse.ERROR, CheckResponse.DOES_NOT_EXIST);
final boolean publish = check == CheckResponse.ERROR;
final List<MockHttpResource> allResources = successfulResources();
final MockHttpResource failureResource = new MockHttpResource(owner, true, check, publish);
allResources.add(failureResource);
Collections.shuffle(allResources, random());
final MultiHttpResource multiResource = new MultiHttpResource(owner, allResources);
assertFalse(multiResource.doCheckAndPublish(client));
boolean found = false;
for (final MockHttpResource resource : allResources) {
// should stop looking at this point
if (resource == failureResource) {
assertThat(resource.checked, equalTo(1));
if (resource.check == CheckResponse.ERROR) {
assertThat(resource.published, equalTo(0));
} else {
assertThat(resource.published, equalTo(1));
}
found = true;
} else if (found) {
assertThat(resource.checked, equalTo(0));
assertThat(resource.published, equalTo(0));
}
else {
assertSuccessfulResource(resource);
}
}
}
private List<MockHttpResource> successfulResources() {
final int successful = randomIntBetween(2, 5);
final List<MockHttpResource> resources = new ArrayList<>(successful);
for (int i = 0; i < successful; ++i) {
final CheckResponse check = randomFrom(CheckResponse.DOES_NOT_EXIST, CheckResponse.EXISTS);
final MockHttpResource resource = new MockHttpResource(owner, randomBoolean(), check, check == CheckResponse.DOES_NOT_EXIST);
resources.add(resource);
}
return resources;
}
private void assertSuccessfulResource(final MockHttpResource resource) {
assertThat(resource.checked, equalTo(1));
if (resource.check == CheckResponse.DOES_NOT_EXIST) {
assertThat(resource.published, equalTo(1));
} else {
assertThat(resource.published, equalTo(0));
}
}
}

View File

@ -0,0 +1,78 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.apache.lucene.util.SetOnce.AlreadySetException;
import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
/**
* Tests {@link NodeFailureListener}.
*/
public class NodeFailureListenerTests extends ESTestCase {
private final Sniffer sniffer = mock(Sniffer.class);
private final HttpResource resource = new MockHttpResource(getTestName(), false);
private final HttpHost host = new HttpHost("localhost", 9200);
private final NodeFailureListener listener = new NodeFailureListener();
public void testSetSnifferTwiceFails() {
listener.setSniffer(sniffer);
assertThat(listener.getSniffer(), is(sniffer));
expectThrows(AlreadySetException.class, () -> listener.setSniffer(randomFrom(sniffer, null)));
}
public void testSetResourceTwiceFails() {
listener.setResource(resource);
assertThat(listener.getResource(), is(resource));
expectThrows(AlreadySetException.class, () -> listener.setResource(randomFrom(resource, null)));
}
public void testSnifferNotifiedOnFailure() {
listener.setSniffer(sniffer);
listener.onFailure(host);
verify(sniffer).sniffOnFailure(host);
}
public void testResourceNotifiedOnFailure() {
listener.setResource(resource);
listener.onFailure(host);
assertTrue(resource.isDirty());
}
public void testResourceAndSnifferNotifiedOnFailure() {
final HttpResource optionalResource = randomFrom(resource, null);
final Sniffer optionalSniffer = randomFrom(sniffer, null);
listener.setResource(optionalResource);
listener.setSniffer(optionalSniffer);
listener.onFailure(host);
if (optionalResource != null) {
assertTrue(resource.isDirty());
}
if (optionalSniffer != null) {
verify(sniffer).sniffOnFailure(host);
}
}
}

View File

@ -0,0 +1,72 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import java.io.IOException;
import java.io.InputStream;
import java.util.function.Supplier;
import static org.hamcrest.Matchers.is;
/**
* Tests {@link PipelineHttpResource}.
*/
public class PipelineHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
private final String pipelineName = ".my_pipeline";
private final byte[] pipelineBytes = new byte[] { randomByte(), randomByte(), randomByte() };
private final Supplier<byte[]> pipeline = () -> pipelineBytes;
private final PipelineHttpResource resource = new PipelineHttpResource(owner, masterTimeout, pipelineName, pipeline);
public void testPipelineToHttpEntity() throws IOException {
final HttpEntity entity = resource.pipelineToHttpEntity();
assertThat(entity.getContentType().getValue(), is(ContentType.APPLICATION_JSON.toString()));
final InputStream byteStream = entity.getContent();
assertThat(byteStream.available(), is(pipelineBytes.length));
for (final byte pipelineByte : pipelineBytes) {
assertThat(pipelineByte, is((byte)byteStream.read()));
}
assertThat(byteStream.available(), is(0));
}
public void testDoCheckTrue() throws IOException {
assertCheckExists(resource, "/_ingest/pipeline", pipelineName);
}
public void testDoCheckFalse() throws IOException {
assertCheckDoesNotExist(resource, "/_ingest/pipeline", pipelineName);
}
public void testDoCheckNullWithException() throws IOException {
assertCheckWithException(resource, "/_ingest/pipeline", pipelineName);
}
public void testDoPublishTrue() throws IOException {
assertPublishSucceeds(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
}
public void testDoPublishFalse() throws IOException {
assertPublishFails(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
}
public void testDoPublishFalseWithException() throws IOException {
assertPublishWithException(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
}
public void testParameters() {
assertParameters(resource);
}
}

View File

@ -0,0 +1,189 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
import org.mockito.ArgumentCaptor;
import java.io.IOException;
import java.util.function.Supplier;
import static org.hamcrest.Matchers.is;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
/**
* Tests {@link PublishableHttpResource}.
*/
public class PublishableHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
private final String ownerType = "ownerthing";
private final String resourceBasePath = "/_fake";
private final String resourceName = ".my_thing";
private final String resourceType = "thingamajig";
private final Logger logger = mock(Logger.class);
private final HttpEntity entity = mock(HttpEntity.class);
private final Supplier<HttpEntity> body = () -> entity;
private final PublishableHttpResource resource =
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
public void testCheckForResourceExists() throws IOException {
assertCheckForResource(successfulCheckStatus(), CheckResponse.EXISTS, "{} [{}] found on the [{}] {}");
}
public void testCheckForResourceDoesNotExist() throws IOException {
assertCheckForResource(notFoundCheckStatus(), CheckResponse.DOES_NOT_EXIST, "{} [{}] does not exist on the [{}] {}");
}
public void testCheckForResourceUnexpectedResponse() throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final RestStatus failedStatus = failedCheckStatus();
final Response response = response("GET", endpoint, failedStatus);
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
is(CheckResponse.ERROR));
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
verify(client).performRequest("GET", endpoint, resource.getParameters());
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), any(ResponseException.class));
verifyNoMoreInteractions(client, logger);
}
public void testCheckForResourceErrors() throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final RestStatus failedStatus = failedCheckStatus();
final ResponseException responseException = responseException("GET", endpoint, failedStatus);
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
when(client.performRequest("GET", endpoint, resource.getParameters())).thenThrow(e);
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
is(CheckResponse.ERROR));
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
verify(client).performRequest("GET", endpoint, resource.getParameters());
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), eq(e));
verifyNoMoreInteractions(client, logger);
}
public void testPutResourceTrue() throws IOException {
assertPutResource(successfulPublishStatus(), true);
}
public void testPutResourceFalse() throws IOException {
assertPutResource(failedPublishStatus(), false);
}
public void testPutResourceFalseWithException() throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"));
when(client.performRequest("PUT", endpoint, resource.getParameters(), entity)).thenThrow(e);
assertThat(resource.putResource(client, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType), is(false));
verify(logger).trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, owner, ownerType);
verify(client).performRequest("PUT", endpoint, resource.getParameters(), entity);
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), eq(e));
verifyNoMoreInteractions(client, logger);
}
public void testParameters() {
assertParameters(resource);
}
public void testDoCheckAndPublishIgnoresPublishWhenCheckErrors() {
final PublishableHttpResource resource =
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS, CheckResponse.ERROR, true);
assertThat(resource.doCheckAndPublish(client), is(false));
}
public void testDoCheckAndPublish() {
// not an error (the third state)
final PublishableHttpResource.CheckResponse exists = randomBoolean() ? CheckResponse.EXISTS : CheckResponse.DOES_NOT_EXIST;
final boolean publish = randomBoolean();
final PublishableHttpResource resource =
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS, exists, publish);
assertThat(resource.doCheckAndPublish(client), is(exists == CheckResponse.EXISTS || publish));
}
private void assertCheckForResource(final RestStatus status, final CheckResponse expected, final String debugLogMessage)
throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Response response = response("GET", endpoint, status);
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
is(expected));
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
verify(client).performRequest("GET", endpoint, resource.getParameters());
if (expected == CheckResponse.EXISTS) {
verify(response).getStatusLine();
} else {
// 3 times because it also is used in the exception message
verify(response, times(3)).getStatusLine();
verify(response, times(2)).getRequestLine();
verify(response).getHost();
verify(response).getEntity();
}
verify(logger).debug(debugLogMessage, resourceType, resourceName, owner, ownerType);
verifyNoMoreInteractions(client, response, logger);
}
private void assertPutResource(final RestStatus status, final boolean expected) throws IOException {
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
final Response response = response("PUT", endpoint, status);
when(client.performRequest("PUT", endpoint, resource.getParameters(), entity)).thenReturn(response);
assertThat(resource.putResource(client, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType),
is(expected));
verify(client).performRequest("PUT", endpoint, resource.getParameters(), entity);
verify(response).getStatusLine();
verify(logger).trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, owner, ownerType);
if (expected) {
verify(logger).debug("{} [{}] uploaded to the [{}] {}", resourceType, resourceName, owner, ownerType);
} else {
ArgumentCaptor<RuntimeException> e = ArgumentCaptor.forClass(RuntimeException.class);
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), e.capture());
assertThat(e.getValue().getMessage(),
is("[" + resourceBasePath + "/" + resourceName + "] responded with [" + status.getStatus() + "]"));
}
verifyNoMoreInteractions(client, response, logger, entity);
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.elasticsearch.test.ESTestCase;
import java.util.Locale;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.sameInstance;
/**
* Tests {@link Scheme}.
*/
public class SchemeTests extends ESTestCase {
public void testToString() {
for (final Scheme scheme : Scheme.values()) {
assertThat(scheme.toString(), equalTo(scheme.name().toLowerCase(Locale.ROOT)));
}
}
public void testFromString() {
for (final Scheme scheme : Scheme.values()) {
assertThat(Scheme.fromString(scheme.name()), sameInstance(scheme));
assertThat(Scheme.fromString(scheme.name().toLowerCase(Locale.ROOT)), sameInstance(scheme));
}
}
public void testFromStringMalformed() {
assertIllegalScheme("htp");
assertIllegalScheme("htttp");
assertIllegalScheme("httpd");
assertIllegalScheme("ftp");
assertIllegalScheme("ws");
assertIllegalScheme("wss");
assertIllegalScheme("gopher");
}
private void assertIllegalScheme(final String scheme) {
try {
Scheme.fromString(scheme);
fail("scheme should be unknown: [" + scheme + "]");
} catch (final IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("[" + scheme + "]"));
}
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.test.ESTestCase;
import static org.mockito.Mockito.mock;
/**
* Tests {@link SecurityHttpClientConfigCallback}.
*/
public class SecurityHttpClientConfigCallbackTests extends ESTestCase {
private final CredentialsProvider credentialsProvider = mock(CredentialsProvider.class);
private final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
/**
* HttpAsyncClientBuilder's methods are {@code final} and therefore not verifiable.
*/
private final HttpAsyncClientBuilder builder = mock(HttpAsyncClientBuilder.class);
public void testSSLIOSessionStrategyNullThrowsException() {
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
expectThrows(NullPointerException.class, () -> new SecurityHttpClientConfigCallback(null, optionalCredentialsProvider));
}
public void testCustomizeHttpClient() {
final SecurityHttpClientConfigCallback callback = new SecurityHttpClientConfigCallback(sslStrategy, credentialsProvider);
assertSame(credentialsProvider, callback.getCredentialsProvider());
assertSame(sslStrategy, callback.getSSLStrategy());
assertSame(builder, callback.customizeHttpClient(builder));
}
public void testCustomizeHttpClientWithOptionalParameters() {
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
final SecurityHttpClientConfigCallback callback =
new SecurityHttpClientConfigCallback(sslStrategy, optionalCredentialsProvider);
assertSame(builder, callback.customizeHttpClient(builder));
assertSame(optionalCredentialsProvider, callback.getCredentialsProvider());
assertSame(sslStrategy, callback.getSSLStrategy());
}
}

View File

@ -0,0 +1,74 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import java.io.IOException;
import java.io.InputStream;
import java.util.function.Supplier;
import static org.hamcrest.Matchers.is;
/**
* Tests {@link TemplateHttpResource}.
*/
public class TemplateHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
private final String templateName = ".my_template";
private final String templateValue = "{\"template\":\".xyz-*\",\"mappings\":{}}";
private final Supplier<String> template = () -> templateValue;
private final TemplateHttpResource resource = new TemplateHttpResource(owner, masterTimeout, templateName, template);
public void testPipelineToHttpEntity() throws IOException {
final byte[] templateValueBytes = templateValue.getBytes(ContentType.APPLICATION_JSON.getCharset());
final HttpEntity entity = resource.templateToHttpEntity();
assertThat(entity.getContentType().getValue(), is(ContentType.APPLICATION_JSON.toString()));
final InputStream byteStream = entity.getContent();
assertThat(byteStream.available(), is(templateValueBytes.length));
for (final byte templateByte : templateValueBytes) {
assertThat(templateByte, is((byte)byteStream.read()));
}
assertThat(byteStream.available(), is(0));
}
public void testDoCheckTrue() throws IOException {
assertCheckExists(resource, "/_template", templateName);
}
public void testDoCheckFalse() throws IOException {
assertCheckDoesNotExist(resource, "/_template", templateName);
}
public void testDoCheckNullWithException() throws IOException {
assertCheckWithException(resource, "/_template", templateName);
}
public void testDoPublishTrue() throws IOException {
assertPublishSucceeds(resource, "/_template", templateName, StringEntity.class);
}
public void testDoPublishFalse() throws IOException {
assertPublishFails(resource, "/_template", templateName, StringEntity.class);
}
public void testDoPublishFalseWithException() throws IOException {
assertPublishWithException(resource, "/_template", templateName, StringEntity.class);
}
public void testParameters() {
assertParameters(resource);
}
}

View File

@ -0,0 +1,70 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.config.RequestConfig;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Tests {@link TimeoutRequestConfigCallback}.
*/
public class TimeoutRequestConfigCallbackTests extends ESTestCase {
private final TimeValue connectTimeout = mock(TimeValue.class);
private final int connectTimeoutMillis = randomInt();
private final TimeValue socketTimeout = mock(TimeValue.class);
private final int socketTimeoutMillis = randomInt();
private final RequestConfig.Builder builder = mock(RequestConfig.Builder.class);
@Before
public void configureTimeouts() {
when(connectTimeout.millis()).thenReturn((long)connectTimeoutMillis);
when(socketTimeout.millis()).thenReturn((long)socketTimeoutMillis);
}
public void testCustomizeRequestConfig() {
final TimeoutRequestConfigCallback callback = new TimeoutRequestConfigCallback(connectTimeout, socketTimeout);
assertSame(builder, callback.customizeRequestConfig(builder));
verify(builder).setConnectTimeout(connectTimeoutMillis);
verify(builder).setSocketTimeout(socketTimeoutMillis);
}
public void testCustomizeRequestConfigWithOptionalParameters() {
final TimeValue optionalConnectTimeout = randomFrom(connectTimeout, null);
// avoid making both null at the same time
final TimeValue optionalSocketTimeout = optionalConnectTimeout != null ? randomFrom(socketTimeout, null) : socketTimeout;
final TimeoutRequestConfigCallback callback = new TimeoutRequestConfigCallback(optionalConnectTimeout, optionalSocketTimeout);
assertSame(builder, callback.customizeRequestConfig(builder));
assertSame(optionalConnectTimeout, callback.getConnectTimeout());
assertSame(optionalSocketTimeout, callback.getSocketTimeout());
if (optionalConnectTimeout != null) {
verify(builder).setConnectTimeout(connectTimeoutMillis);
} else {
verify(builder, never()).setConnectTimeout(anyInt());
}
if (optionalSocketTimeout != null) {
verify(builder).setSocketTimeout(socketTimeoutMillis);
} else {
verify(builder, never()).setSocketTimeout(anyInt());
}
}
}

View File

@ -0,0 +1,99 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.Version;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
/**
* Tests {@link VersionHttpResource}.
*/
public class VersionHttpResourceTests extends ESTestCase {
private final String owner = getClass().getSimpleName();
private final RestClient client = mock(RestClient.class);
public void testDoCheckAndPublishSuccess() throws IOException {
final Version minimumVersion =
randomFrom(Version.V_2_0_0, Version.V_2_0_0_beta1, Version.V_2_0_0_rc1, Version.V_2_3_3, Version.CURRENT);
final Version version = randomFrom(minimumVersion, Version.CURRENT);
final Response response = responseForVersion(version);
final VersionHttpResource resource = new VersionHttpResource(owner, minimumVersion);
assertTrue(resource.doCheckAndPublish(client));
verify(response).getEntity();
}
public void testDoCheckAndPublishFailedParsing() throws IOException {
// malformed JSON
final Response response = responseForJSON("{");
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
assertFalse(resource.doCheckAndPublish(client));
verify(response).getEntity();
}
public void testDoCheckAndPublishFailedFieldMissing() throws IOException {
// malformed response; imagining that we may change it in the future or someone breaks filter_path
final Response response = responseForJSON("{\"version.number\":\"" + Version.CURRENT + "\"}");
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
assertFalse(resource.doCheckAndPublish(client));
verify(response).getEntity();
}
public void testDoCheckAndPublishFailedFieldWrongType() throws IOException {
// malformed response (should be {version: { number : ... }})
final Response response = responseForJSON("{\"version\":\"" + Version.CURRENT + "\"}");
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
assertFalse(resource.doCheckAndPublish(client));
verify(response).getEntity();
}
public void testDoCheckAndPublishFailedWithIOException() throws IOException {
// request fails for some reason
when(client.performRequest("GET", "/", VersionHttpResource.PARAMETERS)).thenThrow(new IOException("expected"));
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
assertFalse(resource.doCheckAndPublish(client));
}
private Response responseForJSON(final String json) throws IOException {
final StringEntity entity = new StringEntity(json, ContentType.APPLICATION_JSON);
final Response response = mock(Response.class);
when(response.getEntity()).thenReturn(entity);
when(client.performRequest("GET", "/", VersionHttpResource.PARAMETERS)).thenReturn(response);
return response;
}
private Response responseForVersion(final Version version) throws IOException {
return responseForJSON("{\"version\":{\"number\":\"" + version + "\"}}");
}
}

View File

@ -7,7 +7,7 @@ package org.elasticsearch.xpack.monitoring.security;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.rest.RestStatus;
@ -15,7 +15,7 @@ import org.elasticsearch.xpack.monitoring.MonitoringSettings;
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
import org.elasticsearch.xpack.security.InternalClient;
import java.util.ArrayList;
import java.util.stream.Collectors;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.is;
@ -86,7 +86,7 @@ public class MonitoringInternalClientTests extends MonitoringIntegTestCase {
* @return the source of a random monitoring template
*/
private String randomTemplateSource() {
return randomFrom(new ArrayList<>(monitoringTemplates().values()));
return randomFrom(monitoringTemplates().stream().map(Tuple::v2).collect(Collectors.toList()));
}
}

View File

@ -1,43 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.support;
import org.elasticsearch.Version;
import org.elasticsearch.test.ESTestCase;
import java.nio.charset.StandardCharsets;
import java.util.List;
import static org.hamcrest.Matchers.equalTo;
public class VersionUtilsTests extends ESTestCase {
public void testParseVersion() {
List<Version> versions = randomSubsetOf(9, Version.V_2_0_0_beta1, Version.V_2_0_0_beta2, Version.V_2_0_0_rc1, Version.V_2_0_0,
Version.V_2_0_1, Version.V_2_0_2, Version.V_2_1_0, Version.V_2_1_1, Version.V_2_1_2, Version.V_2_2_0, Version.V_2_3_0,
Version.V_5_0_0_alpha1);
for (Version version : versions) {
String output = createOutput(VersionUtils.VERSION_NUMBER_FIELD, version.toString());
assertThat(VersionUtils.parseVersion(output.getBytes(StandardCharsets.UTF_8)), equalTo(version));
assertThat(VersionUtils.parseVersion(VersionUtils.VERSION_NUMBER_FIELD, output), equalTo(version));
}
}
private String createOutput(String fieldName, String value) {
return "{\n" +
" \"name\" : \"Blind Faith\",\n" +
" \"cluster_name\" : \"elasticsearch\",\n" +
" \"version\" : {\n" +
" \"" + fieldName + "\" : \"" + value + "\",\n" +
" \"build_hash\" : \"4092d253dddda0ff1ff3d1c09ac7678e757843f9\",\n" +
" \"build_timestamp\" : \"2015-10-13T08:53:10Z\",\n" +
" \"build_snapshot\" : true,\n" +
" \"lucene_version\" : \"5.2.1\"\n" +
" },\n" +
" \"tagline\" : \"You Know, for Search\"\n" +
"}\n";
}
}

View File

@ -10,6 +10,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.regex.Regex;
@ -54,6 +55,7 @@ import java.nio.file.Path;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@ -170,7 +172,7 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
@Override
protected Set<String> excludeTemplates() {
return monitoringTemplates().keySet();
return monitoringTemplateNames();
}
@Before
@ -278,9 +280,17 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
}
}
protected Map<String, String> monitoringTemplates() {
protected List<Tuple<String, String>> monitoringTemplates() {
return StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
.collect(Collectors.toMap(MonitoringIndexNameResolver::templateName, MonitoringIndexNameResolver::template, (a, b) -> a));
.map((resolver) -> new Tuple<>(resolver.templateName(), resolver.template()))
.distinct()
.collect(Collectors.toList());
}
protected Set<String> monitoringTemplateNames() {
return StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
.map(MonitoringIndexNameResolver::templateName)
.collect(Collectors.toSet());
}
protected void assertTemplateInstalled(String name) {
@ -303,7 +313,7 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
}
protected void waitForMonitoringTemplates() throws Exception {
assertBusy(() -> monitoringTemplates().keySet().forEach(this::assertTemplateInstalled), 30, TimeUnit.SECONDS);
assertBusy(() -> monitoringTemplateNames().forEach(this::assertTemplateInstalled), 30, TimeUnit.SECONDS);
}
protected void waitForMonitoringIndices() throws Exception {

View File

@ -170,7 +170,7 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
this.licensing = new Licensing(settings);
this.security = new Security(settings, env, licenseState, sslService);
this.monitoring = new Monitoring(settings, env, licenseState);
this.monitoring = new Monitoring(settings, licenseState);
this.watcher = new Watcher(settings);
this.graph = new Graph(settings);
// Check if the node is a transport client.

View File

@ -5,6 +5,8 @@
*/
package org.elasticsearch.xpack.ssl;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
@ -13,6 +15,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.xpack.XPackSettings;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLParameters;
@ -92,13 +95,63 @@ public class SSLService extends AbstractComponent {
};
}
/**
* Create a new {@link SSLIOSessionStrategy} based on the provided settings. The settings are used to identify the SSL configuration
* that should be used to create the context.
*
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a context created from the default configuration
* @return Never {@code null}.
*/
public SSLIOSessionStrategy sslIOSessionStrategy(Settings settings) {
SSLConfiguration config = sslConfiguration(settings);
SSLContext sslContext = sslContext(config);
String[] ciphers = supportedCiphers(sslParameters(sslContext).getCipherSuites(), config.cipherSuites(), false);
String[] supportedProtocols = config.supportedProtocols().toArray(Strings.EMPTY_ARRAY);
HostnameVerifier verifier;
if (config.verificationMode().isHostnameVerificationEnabled()) {
verifier = SSLIOSessionStrategy.getDefaultHostnameVerifier();
} else {
verifier = NoopHostnameVerifier.INSTANCE;
}
return sslIOSessionStrategy(sslContext, supportedProtocols, ciphers, verifier);
}
/**
* The {@link SSLParameters} that are associated with the {@code sslContext}.
* <p>
* This method exists to simplify testing since {@link SSLContext#getSupportedSSLParameters()} is {@code final}.
*
* @param sslContext The SSL context for the current SSL settings
* @return Never {@code null}.
*/
SSLParameters sslParameters(SSLContext sslContext) {
return sslContext.getSupportedSSLParameters();
}
/**
* This method only exists to simplify testing of {@link #sslIOSessionStrategy(Settings)} because {@link SSLIOSessionStrategy} does
* not expose any of the parameters that you give it.
*
* @param sslContext SSL Context used to handle SSL / TCP requests
* @param protocols Supported protocols
* @param ciphers Supported ciphers
* @param verifier Hostname verifier
* @return Never {@code null}.
*/
SSLIOSessionStrategy sslIOSessionStrategy(SSLContext sslContext, String[] protocols, String[] ciphers, HostnameVerifier verifier) {
return new SSLIOSessionStrategy(sslContext, protocols, ciphers, verifier);
}
/**
* Create a new {@link SSLSocketFactory} based on the provided settings. The settings are used to identify the ssl configuration that
* should be used to create the socket factory. The socket factory will also properly configure the ciphers and protocols on each
* socket that is created
* @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return
* a factory created from the default configuration
* @return {@link SSLSocketFactory}
* @return Never {@code null}.
*/
public SSLSocketFactory sslSocketFactory(Settings settings) {
SSLConfiguration sslConfiguration = sslConfiguration(settings);

View File

@ -5,9 +5,17 @@
*/
package org.elasticsearch.xpack.ssl;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ssl.DefaultHostnameVerifier;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
@ -15,10 +23,14 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.test.junit.annotations.Network;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.XPackSettings;
import org.mockito.ArgumentCaptor;
import org.junit.Before;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLParameters;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import java.nio.file.Path;
@ -30,11 +42,16 @@ import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.sameInstance;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class SSLServiceTests extends ESTestCase {
@ -283,6 +300,43 @@ public class SSLServiceTests extends ESTestCase {
}
}
public void testSSLStrategy() {
// this just exhaustively verifies that the right things are called and that it uses the right parameters
Settings settings = Settings.builder().build();
SSLService sslService = mock(SSLService.class);
SSLConfiguration sslConfig = mock(SSLConfiguration.class);
SSLParameters sslParameters = mock(SSLParameters.class);
SSLContext sslContext = mock(SSLContext.class);
String[] protocols = new String[] { "protocols" };
String[] ciphers = new String[] { "ciphers!!!" };
String[] supportedCiphers = new String[] { "supported ciphers" };
List<String> requestedCiphers = new ArrayList<>(0);
VerificationMode mode = randomFrom(VerificationMode.values());
ArgumentCaptor<HostnameVerifier> verifier = ArgumentCaptor.forClass(HostnameVerifier.class);
SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
when(sslService.sslConfiguration(settings)).thenReturn(sslConfig);
when(sslService.sslContext(sslConfig)).thenReturn(sslContext);
when(sslService.supportedCiphers(supportedCiphers, requestedCiphers, false)).thenReturn(ciphers);
when(sslService.sslParameters(sslContext)).thenReturn(sslParameters);
when(sslParameters.getCipherSuites()).thenReturn(supportedCiphers);
when(sslConfig.supportedProtocols()).thenReturn(Arrays.asList(protocols));
when(sslConfig.cipherSuites()).thenReturn(requestedCiphers);
when(sslConfig.verificationMode()).thenReturn(mode);
when(sslService.sslIOSessionStrategy(eq(sslContext), eq(protocols), eq(ciphers), verifier.capture())).thenReturn(sslStrategy);
// ensure it actually goes through and calls the real method
when(sslService.sslIOSessionStrategy(settings)).thenCallRealMethod();
assertThat(sslService.sslIOSessionStrategy(settings), sameInstance(sslStrategy));
if (mode.isHostnameVerificationEnabled()) {
assertThat(verifier.getValue(), instanceOf(DefaultHostnameVerifier.class));
} else {
assertThat(verifier.getValue(), sameInstance(NoopHostnameVerifier.INSTANCE));
}
}
@Network
public void testThatSSLContextWithoutSettingsWorks() throws Exception {
SSLService sslService = new SSLService(Settings.EMPTY, env);
@ -291,7 +345,7 @@ public class SSLServiceTests extends ESTestCase {
// Execute a GET on a site known to have a valid certificate signed by a trusted public CA
// This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default
// truststore trusts all common public CAs so the handshake will succeed
client.execute(new HttpGet("https://www.elastic.co/"));
client.execute(new HttpGet("https://www.elastic.co/")).close();
}
}
@ -308,4 +362,55 @@ public class SSLServiceTests extends ESTestCase {
client.execute(new HttpGet("https://www.elastic.co/")).close();
}
}
@Network
public void testThatSSLIOSessionStrategyWithoutSettingsWorks() throws Exception {
SSLService sslService = new SSLService(Settings.EMPTY, env);
SSLIOSessionStrategy sslStrategy = sslService.sslIOSessionStrategy(Settings.EMPTY);
try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().setSSLStrategy(sslStrategy).build()) {
client.start();
// Execute a GET on a site known to have a valid certificate signed by a trusted public CA
// This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default
// truststore trusts all common public CAs so the handshake will succeed
client.execute(new HttpHost("elastic.co", 80, "https"), new HttpGet("/"), new AssertionCallback());
}
}
@Network
public void testThatSSLIOSessionStrategytTrustsJDKTrustedCAs() throws Exception {
Settings settings = Settings.builder()
.put("xpack.ssl.keystore.path", testclientStore)
.put("xpack.ssl.keystore.password", "testclient")
.build();
SSLIOSessionStrategy sslStrategy = new SSLService(settings, env).sslIOSessionStrategy(Settings.EMPTY);
try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().setSSLStrategy(sslStrategy).build()) {
client.start();
// Execute a GET on a site known to have a valid certificate signed by a trusted public CA which will succeed because the JDK
// certs are trusted by default
client.execute(new HttpHost("elastic.co", 80, "https"), new HttpGet("/"), new AssertionCallback());
}
}
class AssertionCallback implements FutureCallback<HttpResponse> {
@Override
public void completed(HttpResponse result) {
assertThat(result.getStatusLine().getStatusCode(), lessThan(300));
}
@Override
public void failed(Exception ex) {
logger.error(ex);
fail(ex.toString());
}
@Override
public void cancelled() {
fail("The request was cancelled for some reason");
}
}
}