Merge pull request elastic/elasticsearch#3 from elastic/master

update from master

Original commit: elastic/x-pack-elasticsearch@2da5400562
This commit is contained in:
Konrad Beiske 2015-08-27 10:18:24 +02:00
commit 28af1e275a
267 changed files with 8960 additions and 3837 deletions

View File

@ -4,7 +4,7 @@
<import file="${elasticsearch.integ.antfile.default}"/>
<target name="start-external-cluster-with-plugin" depends="setup-workspace">
<install-plugin name="elasticsearch-license" file="${integ.deps}/elasticsearch-license-${project.version}.zip" />
<install-plugin name="license" file="${integ.deps}/license-${project.version}.zip" />
<install-plugin name="${project.artifactId}" file="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"/>
<startup-elasticsearch/>
</target>

View File

@ -5,14 +5,14 @@
<modelVersion>4.0.0</modelVersion>
<artifactId>elasticsearch-marvel</artifactId>
<name>Elasticsearch X-Plugins - Marvel</name>
<artifactId>marvel</artifactId>
<name>X-Plugins: Marvel</name>
<description>Elasticsearch Marvel</description>
<parent>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>x-plugins</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
</parent>
<properties>
@ -28,7 +28,7 @@
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
@ -66,7 +66,7 @@
<artifactItems combine.children="append">
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${project.version}</version>
<type>zip</type>
<overWrite>true</overWrite>

View File

@ -1,35 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.marvel.agent.AgentService;
import org.elasticsearch.marvel.agent.collector.CollectorModule;
import org.elasticsearch.marvel.agent.exporter.ExporterModule;
import org.elasticsearch.marvel.agent.renderer.RendererModule;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsModule;
import org.elasticsearch.marvel.license.LicenseModule;
public class MarvelModule extends AbstractModule implements SpawnModules {
@Override
protected void configure() {
bind(AgentService.class).asEagerSingleton();
}
@Override
public Iterable<? extends Module> spawnModules() {
return ImmutableList.of(
new MarvelSettingsModule(),
new LicenseModule(),
new CollectorModule(),
new ExporterModule(),
new RendererModule());
}
}

View File

@ -7,19 +7,31 @@ package org.elasticsearch.marvel;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.AgentService;
import org.elasticsearch.marvel.agent.collector.CollectorModule;
import org.elasticsearch.marvel.agent.exporter.ExporterModule;
import org.elasticsearch.marvel.agent.exporter.HttpESExporter;
import org.elasticsearch.marvel.agent.renderer.RendererModule;
import org.elasticsearch.marvel.agent.settings.MarvelModule;
import org.elasticsearch.marvel.license.LicenseModule;
import org.elasticsearch.marvel.agent.settings.MarvelSetting;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.plugins.AbstractPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tribe.TribeService;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
public class MarvelPlugin extends AbstractPlugin {
public class MarvelPlugin extends Plugin {
private static final ESLogger logger = Loggers.getLogger(MarvelPlugin.class);
@ -47,15 +59,20 @@ public class MarvelPlugin extends AbstractPlugin {
}
@Override
public Collection<Class<? extends Module>> modules() {
public Collection<Module> nodeModules() {
if (!enabled) {
return ImmutableList.of();
return Collections.emptyList();
}
return ImmutableList.<Class<? extends Module>>of(MarvelModule.class);
return Arrays.<Module>asList(
new MarvelModule(),
new LicenseModule(),
new CollectorModule(),
new ExporterModule(),
new RendererModule());
}
@Override
public Collection<Class<? extends LifecycleComponent>> services() {
public Collection<Class<? extends LifecycleComponent>> nodeServices() {
if (!enabled) {
return ImmutableList.of();
}
@ -75,4 +92,18 @@ public class MarvelPlugin extends AbstractPlugin {
}
return settings.getAsBoolean(ENABLED, true);
}
public void onModule(ClusterModule module) {
// HttpESExporter
module.registerClusterDynamicSetting(HttpESExporter.SETTINGS_HOSTS, Validator.EMPTY);
module.registerClusterDynamicSetting(HttpESExporter.SETTINGS_HOSTS + ".*", Validator.EMPTY);
module.registerClusterDynamicSetting(HttpESExporter.SETTINGS_TIMEOUT, Validator.EMPTY);
module.registerClusterDynamicSetting(HttpESExporter.SETTINGS_READ_TIMEOUT, Validator.EMPTY);
module.registerClusterDynamicSetting(HttpESExporter.SETTINGS_SSL_HOSTNAME_VERIFICATION, Validator.EMPTY);
// MarvelSettingsService
for (MarvelSetting setting : MarvelSettings.dynamicSettings()) {
module.registerClusterDynamicSetting(setting.dynamicSettingName(), setting.dynamicValidator());
}
}
}

View File

@ -1,204 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.license.plugin.LicenseVersion;
import java.io.IOException;
import java.io.Serializable;
public class MarvelVersion implements Serializable {
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
// the (internal) format of the id is there so we can easily do after/before checks on the id
public static final int V_2_0_0_Beta1_ID = /*00*/2000001;
public static final MarvelVersion V_2_0_0_Beta1 = new MarvelVersion(V_2_0_0_Beta1_ID, true, Version.V_2_0_0_beta1, LicenseVersion.V_1_0_0);
public static final MarvelVersion CURRENT = V_2_0_0_Beta1;
public static MarvelVersion readVersion(StreamInput in) throws IOException {
return fromId(in.readVInt());
}
public static MarvelVersion fromId(int id) {
switch (id) {
case V_2_0_0_Beta1_ID:
return V_2_0_0_Beta1;
default:
return new MarvelVersion(id, null, Version.CURRENT, LicenseVersion.CURRENT);
}
}
public static void writeVersion(MarvelVersion version, StreamOutput out) throws IOException {
out.writeVInt(version.id);
}
/**
* Returns the smallest version between the 2.
*/
public static MarvelVersion smallest(MarvelVersion version1, MarvelVersion version2) {
return version1.id < version2.id ? version1 : version2;
}
/**
* Returns the version given its string representation, current version if the argument is null or empty
*/
public static MarvelVersion fromString(String version) {
if (!Strings.hasLength(version)) {
return MarvelVersion.CURRENT;
}
String[] parts = version.split("[\\.-]");
if (parts.length < 3 || parts.length > 4) {
throw new IllegalArgumentException("the version needs to contain major, minor and revision, and optionally the build");
}
try {
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
int major = Integer.parseInt(parts[0]) * 1000000;
int minor = Integer.parseInt(parts[1]) * 10000;
int revision = Integer.parseInt(parts[2]) * 100;
int build = 99;
if (parts.length == 4) {
String buildStr = parts[3];
if (buildStr.startsWith("beta")) {
build = Integer.parseInt(buildStr.substring(4));
}
if (buildStr.startsWith("rc")) {
build = Integer.parseInt(buildStr.substring(2)) + 50;
}
}
return fromId(major + minor + revision + build);
} catch(NumberFormatException e) {
throw new IllegalArgumentException("unable to parse version " + version, e);
}
}
public final int id;
public final byte major;
public final byte minor;
public final byte revision;
public final byte build;
public final Boolean snapshot;
public final Version minEsCompatibilityVersion;
public final LicenseVersion minLicenseCompatibilityVersion;
MarvelVersion(int id, @Nullable Boolean snapshot, Version minEsCompatibilityVersion, LicenseVersion minLicenseCompatibilityVersion) {
this.id = id;
this.major = (byte) ((id / 1000000) % 100);
this.minor = (byte) ((id / 10000) % 100);
this.revision = (byte) ((id / 100) % 100);
this.build = (byte) (id % 100);
this.snapshot = snapshot;
this.minEsCompatibilityVersion = minEsCompatibilityVersion;
this.minLicenseCompatibilityVersion = minLicenseCompatibilityVersion;
}
public boolean snapshot() {
return snapshot != null && snapshot;
}
public boolean after(MarvelVersion version) {
return version.id < id;
}
public boolean onOrAfter(MarvelVersion version) {
return version.id <= id;
}
public boolean before(MarvelVersion version) {
return version.id > id;
}
public boolean onOrBefore(MarvelVersion version) {
return version.id >= id;
}
public boolean compatibleWith(MarvelVersion version) {
return version.onOrAfter(minimumCompatibilityVersion());
}
public boolean compatibleWith(Version esVersion) {
return esVersion.onOrAfter(minEsCompatibilityVersion);
}
/**
* Returns the minimum compatible version based on the current
* version. Ie a node needs to have at least the return version in order
* to communicate with a node running the current version. The returned version
* is in most of the cases the smallest major version release unless the current version
* is a beta or RC release then the version itself is returned.
*/
public MarvelVersion minimumCompatibilityVersion() {
return MarvelVersion.smallest(this, fromId(major * 1000000 + 99));
}
/**
* @return The minimum elasticsearch version this marvel version is compatible with.
*/
public Version minimumEsCompatiblityVersion() {
return minEsCompatibilityVersion;
}
/**
* @return The minimum license plugin version this marvel version is compatible with.
*/
public LicenseVersion minimumLicenseCompatibilityVersion() {
return minLicenseCompatibilityVersion;
}
/**
* Just the version number (without -SNAPSHOT if snapshot).
*/
public String number() {
StringBuilder sb = new StringBuilder();
sb.append(major).append('.').append(minor).append('.').append(revision);
if (build < 50) {
sb.append("-beta").append(build);
} else if (build < 99) {
sb.append("-rc").append(build - 50);
}
return sb.toString();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(number());
if (snapshot()) {
sb.append("-SNAPSHOT");
}
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MarvelVersion that = (MarvelVersion) o;
if (id != that.id) return false;
return true;
}
@Override
public int hashCode() {
return id;
}
}

View File

@ -5,68 +5,66 @@
*/
package org.elasticsearch.marvel.agent;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.marvel.agent.collector.Collector;
import org.elasticsearch.marvel.agent.collector.licenses.LicensesCollector;
import org.elasticsearch.marvel.agent.exporter.Exporter;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Collection;
import java.util.Set;
import java.util.*;
public class AgentService extends AbstractLifecycleComponent<AgentService> implements NodeSettingsService.Listener {
private static final String SETTINGS_BASE = "marvel.agent.";
public static final String SETTINGS_INTERVAL = SETTINGS_BASE + "interval";
public static final String SETTINGS_ENABLED = SETTINGS_BASE + "enabled";
public static final String SETTINGS_STATS_TIMEOUT = SETTINGS_BASE + "stats.timeout";
private volatile ExportingWorker exportingWorker;
private volatile Thread workerThread;
private volatile long samplingInterval;
private volatile TimeValue clusterStatsTimeout;
private final MarvelSettings marvelSettings;
private final Collection<Collector> collectors;
private final Collection<Exporter> exporters;
@Inject
public AgentService(Settings settings, NodeSettingsService nodeSettingsService,
@ClusterDynamicSettings DynamicSettings dynamicSettings,
LicenseService licenseService,
LicenseService licenseService, MarvelSettings marvelSettings,
Set<Collector> collectors, Set<Exporter> exporters) {
super(settings);
this.samplingInterval = settings.getAsTime(SETTINGS_INTERVAL, TimeValue.timeValueSeconds(10)).millis();
this.marvelSettings = marvelSettings;
this.samplingInterval = marvelSettings.interval().millis();
TimeValue statsTimeout = settings.getAsTime(SETTINGS_STATS_TIMEOUT, TimeValue.timeValueMinutes(10));
if (settings.getAsBoolean(SETTINGS_ENABLED, true)) {
this.collectors = ImmutableSet.copyOf(collectors);
this.exporters = ImmutableSet.copyOf(exporters);
} else {
this.collectors = ImmutableSet.of();
this.exporters = ImmutableSet.of();
logger.info("collecting disabled by settings");
}
this.collectors = Collections.unmodifiableSet(filterCollectors(collectors, marvelSettings.collectors()));
this.exporters = Collections.unmodifiableSet(exporters);
nodeSettingsService.addListener(this);
dynamicSettings.addDynamicSetting(SETTINGS_INTERVAL);
dynamicSettings.addDynamicSetting(SETTINGS_STATS_TIMEOUT);
logger.trace("marvel is running in [{}] mode", licenseService.mode());
}
protected Set<Collector> filterCollectors(Set<Collector> collectors, String[] filters) {
if (CollectionUtils.isEmpty(filters)) {
return collectors;
}
Set<Collector> list = new HashSet<>();
for (Collector collector : collectors) {
if (Regex.simpleMatch(filters, collector.name().toLowerCase(Locale.ROOT))) {
list.add(collector);
} else if (collector instanceof LicensesCollector) {
list.add(collector);
}
}
return list;
}
protected void applyIntervalSettings() {
if (samplingInterval <= 0) {
logger.info("data sampling is disabled due to interval settings [{}]", samplingInterval);
@ -148,7 +146,7 @@ public class AgentService extends AbstractLifecycleComponent<AgentService> imple
@Override
public void onRefreshSettings(Settings settings) {
TimeValue newSamplingInterval = settings.getAsTime(SETTINGS_INTERVAL, null);
TimeValue newSamplingInterval = settings.getAsTime(MarvelSettings.INTERVAL, null);
if (newSamplingInterval != null && newSamplingInterval.millis() != samplingInterval) {
logger.info("sampling interval updated to [{}]", newSamplingInterval);
samplingInterval = newSamplingInterval.millis();
@ -162,10 +160,14 @@ public class AgentService extends AbstractLifecycleComponent<AgentService> imple
@Override
public void run() {
boolean firstRun = true;
while (!closed) {
// sleep first to allow node to complete initialization before collecting the first start
try {
Thread.sleep(samplingInterval);
long interval = (firstRun && (marvelSettings.startUpDelay() != null)) ? marvelSettings.startUpDelay().millis() : samplingInterval;
Thread.sleep(interval);
if (closed) {
continue;
}
@ -179,11 +181,18 @@ public class AgentService extends AbstractLifecycleComponent<AgentService> imple
exporter.export(results);
}
}
if (closed) {
// Stop collecting if the worker is marked as closed
break;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Throwable t) {
logger.error("Background thread had an uncaught exception:", t);
} finally {
firstRun = false;
}
}
logger.debug("worker shutdown");

View File

@ -7,13 +7,13 @@ package org.elasticsearch.marvel.agent.collector;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
@ -22,17 +22,16 @@ public abstract class AbstractCollector<T> extends AbstractLifecycleComponent<T>
private final String name;
protected final ClusterService clusterService;
protected final ClusterName clusterName;
protected final MarvelSettingsService marvelSettings;
protected final MarvelSettings marvelSettings;
protected final LicenseService licenseService;
@Inject
public AbstractCollector(Settings settings, String name, ClusterService clusterService,
ClusterName clusterName, MarvelSettingsService marvelSettings) {
public AbstractCollector(Settings settings, String name, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService) {
super(settings);
this.name = name;
this.clusterService = clusterService;
this.clusterName = clusterName;
this.marvelSettings = marvelSettings;
this.licenseService = licenseService;
}
@Override
@ -51,22 +50,23 @@ public abstract class AbstractCollector<T> extends AbstractLifecycleComponent<T>
}
/**
* Indicates if the current collector should
* be executed on master node only.
* Indicates if the current collector is allowed to collect data
*/
protected boolean masterOnly() {
return false;
protected boolean canCollect() {
return licenseService.enabled() || licenseService.inExpirationGracePeriod();
}
protected boolean isLocalNodeMaster() {
return clusterService.state().nodes().localNodeMaster();
}
@Override
public Collection<MarvelDoc> collect() {
if (masterOnly() && !clusterService.state().nodes().localNodeMaster()) {
logger.trace("collector [{}] runs on master only", name());
return null;
}
try {
return doCollect();
if (canCollect()) {
return doCollect();
}
logger.trace("collector [{}] can not collect data", name());
} catch (ElasticsearchTimeoutException e) {
logger.error("collector [{}] timed out when collecting data");
} catch (Exception e) {
@ -96,4 +96,8 @@ public abstract class AbstractCollector<T> extends AbstractLifecycleComponent<T>
@Override
protected void doClose() {
}
protected String clusterUUID() {
return clusterService.state().metaData().clusterUUID();
}
}

View File

@ -11,6 +11,7 @@ import org.elasticsearch.marvel.agent.collector.cluster.ClusterStateCollector;
import org.elasticsearch.marvel.agent.collector.cluster.ClusterStatsCollector;
import org.elasticsearch.marvel.agent.collector.indices.IndexRecoveryCollector;
import org.elasticsearch.marvel.agent.collector.indices.IndexStatsCollector;
import org.elasticsearch.marvel.agent.collector.licenses.LicensesCollector;
import org.elasticsearch.marvel.agent.collector.node.NodeStatsCollector;
import java.util.HashSet;
@ -22,6 +23,7 @@ public class CollectorModule extends AbstractModule {
public CollectorModule() {
// Registers default collectors
registerCollector(LicensesCollector.class);
registerCollector(IndexStatsCollector.class);
registerCollector(ClusterStatsCollector.class);
registerCollector(ClusterStateCollector.class);

View File

@ -7,16 +7,15 @@ package org.elasticsearch.marvel.agent.collector.cluster;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
@ -34,15 +33,15 @@ public class ClusterStateCollector extends AbstractCollector<ClusterStateCollect
private final Client client;
@Inject
public ClusterStateCollector(Settings settings, ClusterService clusterService,
ClusterName clusterName, MarvelSettingsService marvelSettings, Client client) {
super(settings, NAME, clusterService, clusterName, marvelSettings);
public ClusterStateCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
Client client) {
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.client = client;
}
@Override
protected boolean masterOnly() {
return true;
protected boolean canCollect() {
return super.canCollect() && isLocalNodeMaster();
}
@Override
@ -52,11 +51,7 @@ public class ClusterStateCollector extends AbstractCollector<ClusterStateCollect
ClusterState clusterState = clusterService.state();
ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().get(marvelSettings.clusterStateTimeout());
results.add(buildMarvelDoc(clusterName.value(), TYPE, System.currentTimeMillis(), clusterState, clusterHealth.getStatus()));
results.add(new ClusterStateMarvelDoc(clusterUUID(), TYPE, System.currentTimeMillis(), clusterState, clusterHealth.getStatus()));
return results.build();
}
protected MarvelDoc buildMarvelDoc(String clusterName, String type, long timestamp, ClusterState clusterState, ClusterHealthStatus status) {
return ClusterStateMarvelDoc.createMarvelDoc(clusterName, type, timestamp, clusterState, status);
}
}

View File

@ -9,40 +9,22 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
public class ClusterStateMarvelDoc extends MarvelDoc<ClusterStateMarvelDoc.Payload> {
public class ClusterStateMarvelDoc extends MarvelDoc {
private final Payload payload;
private final ClusterState clusterState;
private final ClusterHealthStatus status;
public ClusterStateMarvelDoc(String clusterName, String type, long timestamp, Payload payload) {
super(clusterName, type, timestamp);
this.payload = payload;
public ClusterStateMarvelDoc(String clusterUUID, String type, long timestamp, ClusterState clusterState, ClusterHealthStatus status) {
super(clusterUUID, type, timestamp);
this.clusterState = clusterState;
this.status = status;
}
@Override
public ClusterStateMarvelDoc.Payload payload() {
return payload;
public ClusterState getClusterState() {
return clusterState;
}
public static ClusterStateMarvelDoc createMarvelDoc(String clusterName, String type, long timestamp, ClusterState clusterState, ClusterHealthStatus status) {
return new ClusterStateMarvelDoc(clusterName, type, timestamp, new Payload(clusterState, status));
}
public static class Payload {
private final ClusterState clusterState;
private final ClusterHealthStatus status;
Payload(ClusterState clusterState, ClusterHealthStatus status) {
this.clusterState = clusterState;
this.status = status;
}
public ClusterState getClusterState() {
return clusterState;
}
public ClusterHealthStatus getStatus() {
return status;
}
public ClusterHealthStatus getStatus() {
return status;
}
}

View File

@ -8,13 +8,13 @@ package org.elasticsearch.marvel.agent.collector.cluster;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
@ -32,15 +32,15 @@ public class ClusterStatsCollector extends AbstractCollector<ClusterStatsCollect
private final Client client;
@Inject
public ClusterStatsCollector(Settings settings, ClusterService clusterService,
ClusterName clusterName, MarvelSettingsService marvelSettings, Client client) {
super(settings, NAME, clusterService, clusterName, marvelSettings);
public ClusterStatsCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
Client client) {
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.client = client;
}
@Override
protected boolean masterOnly() {
return true;
protected boolean canCollect() {
return super.canCollect() && isLocalNodeMaster();
}
@Override
@ -48,11 +48,7 @@ public class ClusterStatsCollector extends AbstractCollector<ClusterStatsCollect
ImmutableList.Builder<MarvelDoc> results = ImmutableList.builder();
ClusterStatsResponse clusterStatsResponse = client.admin().cluster().prepareClusterStats().get(marvelSettings.clusterStatsTimeout());
results.add(buildMarvelDoc(clusterName.value(), TYPE, System.currentTimeMillis(), clusterStatsResponse));
results.add(new ClusterStatsMarvelDoc(clusterUUID(), TYPE, System.currentTimeMillis(), clusterStatsResponse));
return results.build();
}
protected MarvelDoc buildMarvelDoc(String clusterName, String type, long timestamp, ClusterStatsResponse clusterStatsResponse) {
return ClusterStatsMarvelDoc.createMarvelDoc(clusterName, type, timestamp, clusterStatsResponse);
}
}

View File

@ -8,34 +8,16 @@ package org.elasticsearch.marvel.agent.collector.cluster;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
public class ClusterStatsMarvelDoc extends MarvelDoc<ClusterStatsMarvelDoc.Payload> {
public class ClusterStatsMarvelDoc extends MarvelDoc {
private final Payload payload;
private final ClusterStatsResponse clusterStats;
public ClusterStatsMarvelDoc(String clusterName, String type, long timestamp, Payload payload) {
super(clusterName, type, timestamp);
this.payload = payload;
public ClusterStatsMarvelDoc(String clusterUUID, String type, long timestamp, ClusterStatsResponse clusterStats) {
super(clusterUUID, type, timestamp);
this.clusterStats = clusterStats;
}
@Override
public ClusterStatsMarvelDoc.Payload payload() {
return payload;
}
public static ClusterStatsMarvelDoc createMarvelDoc(String clusterName, String type, long timestamp, ClusterStatsResponse clusterStats) {
return new ClusterStatsMarvelDoc(clusterName, type, timestamp, new Payload(clusterStats));
}
public static class Payload {
private final ClusterStatsResponse clusterStats;
Payload(ClusterStatsResponse clusterStats) {
this.clusterStats = clusterStats;
}
public ClusterStatsResponse getClusterStats() {
return clusterStats;
}
public ClusterStatsResponse getClusterStats() {
return clusterStats;
}
}

View File

@ -7,14 +7,15 @@ package org.elasticsearch.marvel.agent.collector.indices;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
@ -32,15 +33,15 @@ public class IndexRecoveryCollector extends AbstractCollector<IndexRecoveryColle
private final Client client;
@Inject
public IndexRecoveryCollector(Settings settings, ClusterService clusterService,
ClusterName clusterName, MarvelSettingsService marvelSettings, Client client) {
super(settings, NAME, clusterService, clusterName, marvelSettings);
public IndexRecoveryCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
Client client) {
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.client = client;
}
@Override
protected boolean masterOnly() {
return true;
protected boolean canCollect() {
return super.canCollect() && isLocalNodeMaster();
}
@Override
@ -48,16 +49,14 @@ public class IndexRecoveryCollector extends AbstractCollector<IndexRecoveryColle
ImmutableList.Builder<MarvelDoc> results = ImmutableList.builder();
RecoveryResponse recoveryResponse = client.admin().indices().prepareRecoveries()
.setIndices(marvelSettings.indices())
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
.setActiveOnly(marvelSettings.recoveryActiveOnly())
.get(marvelSettings.recoveryTimeout());
if (recoveryResponse.hasRecoveries()) {
results.add(buildMarvelDoc(clusterName.value(), TYPE, System.currentTimeMillis(), recoveryResponse));
results.add(new IndexRecoveryMarvelDoc(clusterUUID(), TYPE, System.currentTimeMillis(), recoveryResponse));
}
return results.build();
}
protected MarvelDoc buildMarvelDoc(String clusterName, String type, long timestamp, RecoveryResponse recoveryResponse) {
return IndexRecoveryMarvelDoc.createMarvelDoc(clusterName, type, timestamp, recoveryResponse);
}
}

View File

@ -8,36 +8,16 @@ package org.elasticsearch.marvel.agent.collector.indices;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
public class IndexRecoveryMarvelDoc extends MarvelDoc<IndexRecoveryMarvelDoc.Payload> {
public class IndexRecoveryMarvelDoc extends MarvelDoc {
private final Payload payload;
private final RecoveryResponse recoveryResponse;
public IndexRecoveryMarvelDoc(String clusterName, String type, long timestamp, Payload payload) {
super(clusterName, type, timestamp);
this.payload = payload;
public IndexRecoveryMarvelDoc(String clusterUUID, String type, long timestamp, RecoveryResponse recoveryResponse) {
super(clusterUUID, type, timestamp);
this.recoveryResponse = recoveryResponse;
}
@Override
public IndexRecoveryMarvelDoc.Payload payload() {
return payload;
}
public static IndexRecoveryMarvelDoc createMarvelDoc(String clusterName, String type, long timestamp,
RecoveryResponse recoveryResponse) {
return new IndexRecoveryMarvelDoc(clusterName, type, timestamp, new Payload(recoveryResponse));
}
public static class Payload {
RecoveryResponse recoveryResponse;
public Payload(RecoveryResponse recoveryResponse) {
this.recoveryResponse = recoveryResponse;
}
public RecoveryResponse getRecoveryResponse() {
return recoveryResponse;
}
public RecoveryResponse getRecoveryResponse() {
return recoveryResponse;
}
}

View File

@ -8,14 +8,15 @@ package org.elasticsearch.marvel.agent.collector.indices;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
@ -33,15 +34,15 @@ public class IndexStatsCollector extends AbstractCollector<IndexStatsCollector>
private final Client client;
@Inject
public IndexStatsCollector(Settings settings, ClusterService clusterService,
ClusterName clusterName, MarvelSettingsService marvelSettings, Client client) {
super(settings, NAME, clusterService, clusterName, marvelSettings);
public IndexStatsCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
Client client) {
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.client = client;
}
@Override
protected boolean masterOnly() {
return true;
protected boolean canCollect() {
return super.canCollect() && isLocalNodeMaster();
}
@Override
@ -49,20 +50,16 @@ public class IndexStatsCollector extends AbstractCollector<IndexStatsCollector>
ImmutableList.Builder<MarvelDoc> results = ImmutableList.builder();
IndicesStatsResponse indicesStats = client.admin().indices().prepareStats()
.setStore(true)
.setIndexing(true)
.setDocs(true)
.setRefresh(true)
.setIndices(marvelSettings.indices())
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
.get(marvelSettings.indexStatsTimeout());
long timestamp = System.currentTimeMillis();
String clusterUUID = clusterUUID();
for (IndexStats indexStats : indicesStats.getIndices().values()) {
results.add(buildMarvelDoc(clusterName.value(), TYPE, timestamp, indexStats));
results.add(new IndexStatsMarvelDoc(clusterUUID, TYPE, timestamp, indexStats));
}
return results.build();
}
protected MarvelDoc buildMarvelDoc(String clusterName, String type, long timestamp, IndexStats indexStats) {
return IndexStatsMarvelDoc.createMarvelDoc(clusterName, type, timestamp, indexStats);
}
}

View File

@ -8,34 +8,16 @@ package org.elasticsearch.marvel.agent.collector.indices;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
public class IndexStatsMarvelDoc extends MarvelDoc<IndexStatsMarvelDoc.Payload> {
public class IndexStatsMarvelDoc extends MarvelDoc {
private final Payload payload;
private final IndexStats indexStats;
public IndexStatsMarvelDoc(String clusterName, String type, long timestamp, Payload payload) {
super(clusterName, type, timestamp);
this.payload = payload;
public IndexStatsMarvelDoc(String clusterUUID, String type, long timestamp, IndexStats indexStats) {
super(clusterUUID, type, timestamp);
this.indexStats = indexStats;
}
@Override
public IndexStatsMarvelDoc.Payload payload() {
return payload;
}
public static IndexStatsMarvelDoc createMarvelDoc(String clusterName, String type, long timestamp, IndexStats indexStats) {
return new IndexStatsMarvelDoc(clusterName, type, timestamp, new Payload(indexStats));
}
public static class Payload {
private final IndexStats indexStats;
Payload(IndexStats indexStats) {
this.indexStats = indexStats;
}
public IndexStats getIndexStats() {
return indexStats;
}
public IndexStats getIndexStats() {
return indexStats;
}
}

View File

@ -0,0 +1,64 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.collector.licenses;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.core.License;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import java.util.Collection;
import java.util.List;
/**
* Collector for registered licenses.
* <p/>
* This collector runs on the master node and collect data about all
* known licenses that are currently registered. Each license is
* collected as a {@link LicensesMarvelDoc} document.
*/
public class LicensesCollector extends AbstractCollector<LicensesMarvelDoc> {
public static final String NAME = "licenses-collector";
public static final String TYPE = "cluster_licenses";
private final ClusterName clusterName;
private final LicenseService licenseService;
@Inject
public LicensesCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
ClusterName clusterName) {
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.clusterName = clusterName;
this.licenseService = licenseService;
}
@Override
protected boolean canCollect() {
// This collector can always collect data on the master node
return isLocalNodeMaster();
}
@Override
protected Collection<MarvelDoc> doCollect() throws Exception {
ImmutableList.Builder<MarvelDoc> results = ImmutableList.builder();
List<License> licenses = licenseService.licenses();
if (licenses != null) {
String clusterUUID = clusterUUID();
results.add(new LicensesMarvelDoc(MarvelSettings.MARVEL_DATA_INDEX_NAME, TYPE, clusterUUID, clusterUUID, System.currentTimeMillis(),
clusterName.value(), Version.CURRENT.toString(), licenses));
}
return results.build();
}
}

View File

@ -0,0 +1,39 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.collector.licenses;
import org.elasticsearch.license.core.License;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import java.util.List;
public class LicensesMarvelDoc extends MarvelDoc {
private final String clusterName;
private final String version;
private final List<License> licenses;
LicensesMarvelDoc(String index, String type, String id, String clusterUUID, long timestamp,
String clusterName, String version, List<License> licenses) {
super(index, type, id, clusterUUID, timestamp);
this.clusterName = clusterName;
this.version = version;
this.licenses = licenses;
}
public String getClusterName() {
return clusterName;
}
public String getVersion() {
return version;
}
public List<License> getLicenses() {
return licenses;
}
}

View File

@ -8,8 +8,7 @@ package org.elasticsearch.marvel.agent.collector.node;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.common.inject.ConfigurationException;
@ -20,7 +19,8 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.marvel.agent.collector.AbstractCollector;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.node.service.NodeService;
import java.util.Collection;
@ -44,11 +44,10 @@ public class NodeStatsCollector extends AbstractCollector<NodeStatsCollector> {
private final Provider<DiskThresholdDecider> diskThresholdDeciderProvider;
@Inject
public NodeStatsCollector(Settings settings, ClusterService clusterService, ClusterName clusterName,
MarvelSettingsService marvelSettings,
public NodeStatsCollector(Settings settings, ClusterService clusterService, MarvelSettings marvelSettings, LicenseService licenseService,
NodeService nodeService, DiscoveryService discoveryService,
Provider<DiskThresholdDecider> diskThresholdDeciderProvider) {
super(settings, NAME, clusterService, clusterName, marvelSettings);
super(settings, NAME, clusterService, marvelSettings, licenseService);
this.nodeService = nodeService;
this.discoveryService = discoveryService;
this.diskThresholdDeciderProvider = diskThresholdDeciderProvider;
@ -72,16 +71,10 @@ public class NodeStatsCollector extends AbstractCollector<NodeStatsCollector> {
Double diskThresholdWatermarkHigh = (diskThresholdDecider != null) ? 100.0 - diskThresholdDecider.getFreeDiskThresholdHigh() : -1;
boolean diskThresholdDeciderEnabled = (diskThresholdDecider != null) && diskThresholdDecider.isEnabled();
results.add(buildMarvelDoc(clusterName.value(), TYPE, System.currentTimeMillis(),
discoveryService.localNode().id(), clusterService.state().nodes().localNodeMaster(), nodeStats,
Bootstrap.isMemoryLocked(), diskThresholdWatermarkHigh, diskThresholdDeciderEnabled));
results.add(new NodeStatsMarvelDoc(clusterUUID(), TYPE, System.currentTimeMillis(),
discoveryService.localNode().id(), isLocalNodeMaster(), nodeStats,
BootstrapInfo.isMemoryLocked(), diskThresholdWatermarkHigh, diskThresholdDeciderEnabled));
return results.build();
}
protected MarvelDoc buildMarvelDoc(String clusterName, String type, long timestamp,
String nodeId, boolean nodeMaster, NodeStats nodeStats,
boolean mlockall, Double diskThresholdWaterMarkHigh, boolean diskThresholdDeciderEnabled) {
return NodeStatsMarvelDoc.createMarvelDoc(clusterName, type, timestamp, nodeId, nodeMaster, nodeStats, mlockall, diskThresholdWaterMarkHigh, diskThresholdDeciderEnabled);
}
}

View File

@ -8,68 +8,49 @@ package org.elasticsearch.marvel.agent.collector.node;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
public class NodeStatsMarvelDoc extends MarvelDoc<NodeStatsMarvelDoc.Payload> {
public class NodeStatsMarvelDoc extends MarvelDoc {
private final Payload payload;
private final String nodeId;
private final boolean nodeMaster;
private final NodeStats nodeStats;
public NodeStatsMarvelDoc(String clusterName, String type, long timestamp, Payload payload) {
super(clusterName, type, timestamp);
this.payload = payload;
private final boolean mlockall;
private final Double diskThresholdWaterMarkHigh;
private final boolean diskThresholdDeciderEnabled;
public NodeStatsMarvelDoc(String clusterUUID, String type, long timestamp,
String nodeId, boolean nodeMaster, NodeStats nodeStats, boolean mlockall, Double diskThresholdWaterMarkHigh, boolean diskThresholdDeciderEnabled) {
super(clusterUUID, type, timestamp);
this.nodeId = nodeId;
this.nodeMaster = nodeMaster;
this.nodeStats = nodeStats;
this.mlockall = mlockall;
this.diskThresholdWaterMarkHigh = diskThresholdWaterMarkHigh;
this.diskThresholdDeciderEnabled = diskThresholdDeciderEnabled;
}
@Override
public Payload payload() {
return payload;
public String getNodeId() {
return nodeId;
}
public static NodeStatsMarvelDoc createMarvelDoc(String clusterName, String type, long timestamp,
String nodeId, boolean nodeMaster, NodeStats nodeStats,
boolean mlockall, Double diskThresholdWaterMarkHigh, boolean diskThresholdDeciderEnabled) {
return new NodeStatsMarvelDoc(clusterName, type, timestamp, new Payload(nodeId, nodeMaster, nodeStats, mlockall, diskThresholdWaterMarkHigh, diskThresholdDeciderEnabled));
public boolean isNodeMaster() {
return nodeMaster;
}
public static class Payload {
public NodeStats getNodeStats() {
return nodeStats;
}
private final String nodeId;
private final boolean nodeMaster;
private final NodeStats nodeStats;
public boolean isMlockall() {
return mlockall;
}
private final boolean mlockall;
private final Double diskThresholdWaterMarkHigh;
private final boolean diskThresholdDeciderEnabled;
public Double getDiskThresholdWaterMarkHigh() {
return diskThresholdWaterMarkHigh;
}
Payload(String nodeId, boolean nodeMaster, NodeStats nodeStats, boolean mlockall, Double diskThresholdWaterMarkHigh, boolean diskThresholdDeciderEnabled) {
this.nodeId = nodeId;
this.nodeMaster = nodeMaster;
this.nodeStats = nodeStats;
this.mlockall = mlockall;
this.diskThresholdWaterMarkHigh = diskThresholdWaterMarkHigh;
this.diskThresholdDeciderEnabled = diskThresholdDeciderEnabled;
}
public String getNodeId() {
return nodeId;
}
public boolean isNodeMaster() {
return nodeMaster;
}
public NodeStats getNodeStats() {
return nodeStats;
}
public boolean isMlockall() {
return mlockall;
}
public Double getDiskThresholdWaterMarkHigh() {
return diskThresholdWaterMarkHigh;
}
public boolean isDiskThresholdDeciderEnabled() {
return diskThresholdDeciderEnabled;
}
public boolean isDiskThresholdDeciderEnabled() {
return diskThresholdDeciderEnabled;
}
}

View File

@ -9,8 +9,6 @@ import com.google.common.io.ByteStreams;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
@ -102,7 +100,6 @@ public class HttpESExporter extends AbstractExporter<HttpESExporter> implements
@Inject
public HttpESExporter(Settings settings, ClusterService clusterService, ClusterName clusterName,
@ClusterDynamicSettings DynamicSettings dynamicSettings,
NodeSettingsService nodeSettingsService,
NodeService nodeService, Environment environment,
RendererRegistry registry) {
@ -132,12 +129,6 @@ public class HttpESExporter extends AbstractExporter<HttpESExporter> implements
bulkTimeout = settings.getAsTime(SETTINGS_CHECK_TEMPLATE_TIMEOUT, null);
keepAliveWorker = new ConnectionKeepAliveWorker();
dynamicSettings.addDynamicSetting(SETTINGS_HOSTS);
dynamicSettings.addDynamicSetting(SETTINGS_HOSTS + ".*");
dynamicSettings.addDynamicSetting(SETTINGS_TIMEOUT);
dynamicSettings.addDynamicSetting(SETTINGS_READ_TIMEOUT);
dynamicSettings.addDynamicSetting(SETTINGS_SSL_HOSTNAME_VERIFICATION);
nodeSettingsService.addListener(this);
if (!settings.getByPrefix(SETTINGS_SSL_PREFIX).getAsMap().isEmpty()) {
@ -204,7 +195,17 @@ public class HttpESExporter extends AbstractExporter<HttpESExporter> implements
// Builds the bulk action metadata line
builder.startObject();
builder.startObject("index").field("_type", marvelDoc.type()).endObject();
builder.startObject("index");
if (marvelDoc.index() != null) {
builder.field("_index", marvelDoc.index());
}
if (marvelDoc.type() != null) {
builder.field("_type", marvelDoc.type());
}
if (marvelDoc.id() != null) {
builder.field("_id", marvelDoc.id());
}
builder.endObject();
builder.endObject();
// Adds action metadata line bulk separator

View File

@ -5,29 +5,44 @@
*/
package org.elasticsearch.marvel.agent.exporter;
public abstract class MarvelDoc<T> {
public abstract class MarvelDoc {
private final String clusterName;
private final String index;
private final String type;
private final String id;
private final String clusterUUID;
private final long timestamp;
public MarvelDoc(String clusterName, String type, long timestamp) {
this.clusterName = clusterName;
public MarvelDoc(String index, String type, String id, String clusterUUID, long timestamp) {
this.index = index;
this.type = type;
this.id = id;
this.clusterUUID = clusterUUID;
this.timestamp = timestamp;
}
public String clusterName() {
return clusterName;
public MarvelDoc(String clusterUUID, String type, long timestamp) {
this(null, type, null, clusterUUID, timestamp);
}
public String clusterUUID() {
return clusterUUID;
}
public String index() {
return index;
}
public String type() {
return type;
}
public String id() {
return id;
}
public long timestamp() {
return timestamp;
}
public abstract T payload();
}

View File

@ -26,7 +26,7 @@ import java.util.Set;
public abstract class AbstractRenderer<T extends MarvelDoc> implements Renderer<T> {
private static final String[] DEFAULT_FILTERS = {
Fields.CLUSTER_NAME.underscore().toString(),
Fields.CLUSTER_UUID.underscore().toString(),
Fields.TIMESTAMP.underscore().toString(),
};
@ -54,7 +54,7 @@ public abstract class AbstractRenderer<T extends MarvelDoc> implements Renderer<
builder.startObject();
// Add fields common to all Marvel documents
builder.field(Fields.CLUSTER_NAME, marvelDoc.clusterName());
builder.field(Fields.CLUSTER_UUID, marvelDoc.clusterUUID());
DateTime timestampDateTime = new DateTime(marvelDoc.timestamp(), DateTimeZone.UTC);
builder.field(Fields.TIMESTAMP, timestampDateTime.toString());
@ -77,7 +77,7 @@ public abstract class AbstractRenderer<T extends MarvelDoc> implements Renderer<
}
static final class Fields {
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString CLUSTER_UUID = new XContentBuilderString("cluster_uuid");
static final XContentBuilderString TIMESTAMP = new XContentBuilderString("timestamp");
}
}

View File

@ -11,11 +11,13 @@ import org.elasticsearch.marvel.agent.collector.cluster.ClusterStateCollector;
import org.elasticsearch.marvel.agent.collector.cluster.ClusterStatsCollector;
import org.elasticsearch.marvel.agent.collector.indices.IndexRecoveryCollector;
import org.elasticsearch.marvel.agent.collector.indices.IndexStatsCollector;
import org.elasticsearch.marvel.agent.collector.licenses.LicensesCollector;
import org.elasticsearch.marvel.agent.collector.node.NodeStatsCollector;
import org.elasticsearch.marvel.agent.renderer.cluster.ClusterStateRenderer;
import org.elasticsearch.marvel.agent.renderer.cluster.ClusterStatsRenderer;
import org.elasticsearch.marvel.agent.renderer.indices.IndexRecoveryRenderer;
import org.elasticsearch.marvel.agent.renderer.indices.IndexStatsRenderer;
import org.elasticsearch.marvel.agent.renderer.licenses.LicensesRenderer;
import org.elasticsearch.marvel.agent.renderer.node.NodeStatsRenderer;
import java.util.HashMap;
@ -34,6 +36,9 @@ public class RendererModule extends AbstractModule {
MapBinder<String, Renderer> mbinder = MapBinder.newMapBinder(binder(), String.class, Renderer.class);
// Bind default renderers
bind(LicensesRenderer.class).asEagerSingleton();
mbinder.addBinding(LicensesCollector.TYPE).to(LicensesRenderer.class);
bind(IndexStatsRenderer.class).asEagerSingleton();
mbinder.addBinding(IndexStatsCollector.TYPE).to(IndexStatsRenderer.class);

View File

@ -20,7 +20,7 @@ import java.util.Locale;
public class ClusterStateRenderer extends AbstractRenderer<ClusterStateMarvelDoc> {
private static final String[] FILTERS = {
public static final String[] FILTERS = {
"cluster_state.version",
"cluster_state.master_node",
"cluster_state.status",
@ -36,25 +36,22 @@ public class ClusterStateRenderer extends AbstractRenderer<ClusterStateMarvelDoc
protected void doRender(ClusterStateMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.CLUSTER_STATE);
ClusterStateMarvelDoc.Payload payload = marvelDoc.payload();
if (payload != null) {
ClusterState clusterState = payload.getClusterState();
if (clusterState != null) {
builder.field(Fields.STATUS, payload.getStatus().name().toLowerCase(Locale.ROOT));
ClusterState clusterState = marvelDoc.getClusterState();
if (clusterState != null) {
builder.field(Fields.STATUS, marvelDoc.getStatus().name().toLowerCase(Locale.ROOT));
clusterState.toXContent(builder, params);
clusterState.toXContent(builder, params);
RoutingTable routingTable = clusterState.routingTable();
if (routingTable != null) {
List<ShardRouting> shards = routingTable.allShards();
if (shards != null) {
RoutingTable routingTable = clusterState.routingTable();
if (routingTable != null) {
List<ShardRouting> shards = routingTable.allShards();
if (shards != null) {
builder.startArray(Fields.SHARDS);
for (ShardRouting shard : shards) {
shard.toXContent(builder, params);
}
builder.endArray();
builder.startArray(Fields.SHARDS);
for (ShardRouting shard : shards) {
shard.toXContent(builder, params);
}
builder.endArray();
}
}
}

View File

@ -16,7 +16,7 @@ import java.io.IOException;
public class ClusterStatsRenderer extends AbstractRenderer<ClusterStatsMarvelDoc> {
private static final String[] FILTERS = {
public static final String[] FILTERS = {
"cluster_stats.nodes.count.total",
"cluster_stats.indices.shards.total",
"cluster_stats.indices.shards.index.replication.min",
@ -37,12 +37,9 @@ public class ClusterStatsRenderer extends AbstractRenderer<ClusterStatsMarvelDoc
protected void doRender(ClusterStatsMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.CLUSTER_STATS);
ClusterStatsMarvelDoc.Payload payload = marvelDoc.payload();
if (payload != null) {
ClusterStatsResponse clusterStats = payload.getClusterStats();
if (clusterStats != null) {
clusterStats.toXContent(builder, params);
}
ClusterStatsResponse clusterStats = marvelDoc.getClusterStats();
if (clusterStats != null) {
clusterStats.toXContent(builder, params);
}
builder.endObject();

View File

@ -27,29 +27,27 @@ public class IndexRecoveryRenderer extends AbstractRenderer<IndexRecoveryMarvelD
protected void doRender(IndexRecoveryMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.INDEX_RECOVERY);
IndexRecoveryMarvelDoc.Payload payload = marvelDoc.payload();
if (payload != null) {
RecoveryResponse recovery = payload.getRecoveryResponse();
if (recovery != null) {
builder.startArray(Fields.SHARDS);
Map<String, List<ShardRecoveryResponse>> shards = recovery.shardResponses();
if (shards != null) {
for (Map.Entry<String, List<ShardRecoveryResponse>> shard : shards.entrySet()) {
RecoveryResponse recovery = marvelDoc.getRecoveryResponse();
if (recovery != null) {
builder.startArray(Fields.SHARDS);
Map<String, List<ShardRecoveryResponse>> shards = recovery.shardResponses();
if (shards != null) {
for (Map.Entry<String, List<ShardRecoveryResponse>> shard : shards.entrySet()) {
List<ShardRecoveryResponse> indexShards = shard.getValue();
if (indexShards != null) {
for (ShardRecoveryResponse indexShard : indexShards) {
builder.startObject();
builder.field(Fields.INDEX_NAME, shard.getKey());
indexShard.toXContent(builder, params);
builder.endObject();
}
List<ShardRecoveryResponse> indexShards = shard.getValue();
if (indexShards != null) {
for (ShardRecoveryResponse indexShard : indexShards) {
builder.startObject();
builder.field(Fields.INDEX_NAME, shard.getKey());
indexShard.toXContent(builder, params);
builder.endObject();
}
}
}
builder.endArray();
}
builder.endArray();
}
builder.endObject();
}

View File

@ -16,12 +16,20 @@ import java.io.IOException;
public class IndexStatsRenderer extends AbstractRenderer<IndexStatsMarvelDoc> {
private static final String[] FILTERS = {
public static final String[] FILTERS = {
"index_stats.index",
"index_stats.primaries.docs.count",
"index_stats.total.docs.count",
"index_stats.total.indexing.index_total",
"index_stats.total.indexing.index_time_in_millis",
"index_stats.total.indexing.throttle_time_in_millis",
"index_stats.total.merges.total_size_in_bytes",
"index_stats.total.search.query_total",
"index_stats.total.search.query_time_in_millis",
"index_stats.total.segments.memory_in_bytes",
"index_stats.total.store.size_in_bytes",
"index_stats.total.store.throttle_time_in_millis",
"index_stats.total.indexing.throttle_time_in_millis",
"index_stats.total.refresh.total_time_in_millis",
};
public IndexStatsRenderer() {
@ -32,19 +40,23 @@ public class IndexStatsRenderer extends AbstractRenderer<IndexStatsMarvelDoc> {
protected void doRender(IndexStatsMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.INDEX_STATS);
IndexStatsMarvelDoc.Payload payload = marvelDoc.payload();
if (payload != null) {
IndexStats indexStats = payload.getIndexStats();
if (indexStats != null) {
builder.field(Fields.INDEX, indexStats.getIndex());
IndexStats indexStats = marvelDoc.getIndexStats();
if (indexStats != null) {
builder.field(Fields.INDEX, indexStats.getIndex());
builder.startObject(Fields.TOTAL);
if (indexStats.getTotal() != null) {
indexStats.getTotal().toXContent(builder, params);
}
builder.endObject();
builder.startObject(Fields.TOTAL);
if (indexStats.getTotal() != null) {
indexStats.getTotal().toXContent(builder, params);
}
builder.endObject();
builder.startObject(Fields.PRIMARIES);
if (indexStats.getPrimaries() != null) {
indexStats.getPrimaries().toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
}
@ -52,5 +64,6 @@ public class IndexStatsRenderer extends AbstractRenderer<IndexStatsMarvelDoc> {
static final XContentBuilderString INDEX_STATS = new XContentBuilderString("index_stats");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString PRIMARIES = new XContentBuilderString("primaries");
}
}

View File

@ -0,0 +1,83 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.renderer.licenses;
import com.google.common.base.Charsets;
import com.google.common.hash.Hashing;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.license.core.License;
import org.elasticsearch.marvel.agent.collector.licenses.LicensesMarvelDoc;
import org.elasticsearch.marvel.agent.renderer.AbstractRenderer;
import java.io.IOException;
import java.util.List;
public class LicensesRenderer extends AbstractRenderer<LicensesMarvelDoc> {
public LicensesRenderer() {
super(Strings.EMPTY_ARRAY, false);
}
@Override
protected void doRender(LicensesMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.field(Fields.CLUSTER_NAME, marvelDoc.getClusterName());
builder.field(Fields.VERSION, marvelDoc.getVersion());
builder.startArray(Fields.LICENSES);
List<License> licenses = marvelDoc.getLicenses();
if (licenses != null) {
for (License license : licenses) {
builder.startObject();
builder.field(Fields.STATUS, license.status().label());
builder.field(Fields.UID, license.uid());
builder.field(Fields.TYPE, license.type());
builder.dateValueField(Fields.ISSUE_DATE_IN_MILLIS, Fields.ISSUE_DATE, license.issueDate());
builder.field(Fields.FEATURE, license.feature());
builder.dateValueField(Fields.EXPIRY_DATE_IN_MILLIS, Fields.EXPIRY_DATE, license.expiryDate());
builder.field(Fields.MAX_NODES, license.maxNodes());
builder.field(Fields.ISSUED_TO, license.issuedTo());
builder.field(Fields.ISSUER, license.issuer());
builder.field(Fields.HKEY, hash(license, marvelDoc.clusterUUID()));
builder.endObject();
}
}
builder.endArray();
}
public static String hash(License license, String clusterName) {
return hash(license.status().label(), license.uid(), license.type(), String.valueOf(license.expiryDate()), clusterName);
}
public static String hash(String licenseStatus, String licenseUid, String licenseType, String licenseExpiryDate, String clusterUUID) {
String toHash = licenseStatus + licenseUid + licenseType + licenseExpiryDate + clusterUUID;
return Hashing.sha256().hashString(toHash, Charsets.UTF_8).toString();
}
static final class Fields {
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
static final XContentBuilderString LICENSES = new XContentBuilderString("licenses");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString HKEY = new XContentBuilderString("hkey");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString UID = new XContentBuilderString("uid");
static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString FEATURE = new XContentBuilderString("feature");
static final XContentBuilderString ISSUE_DATE_IN_MILLIS = new XContentBuilderString("issue_date_in_millis");
static final XContentBuilderString ISSUE_DATE = new XContentBuilderString("issue_date");
static final XContentBuilderString EXPIRY_DATE_IN_MILLIS = new XContentBuilderString("expiry_date_in_millis");
static final XContentBuilderString EXPIRY_DATE = new XContentBuilderString("expiry_date");
static final XContentBuilderString MAX_NODES = new XContentBuilderString("max_nodes");
static final XContentBuilderString ISSUED_TO = new XContentBuilderString("issued_to");
static final XContentBuilderString ISSUER = new XContentBuilderString("issuer");
}
}

View File

@ -16,7 +16,7 @@ import java.io.IOException;
public class NodeStatsRenderer extends AbstractRenderer<NodeStatsMarvelDoc> {
private static final String[] FILTERS = {
public static final String[] FILTERS = {
// Extra information
"node_stats.node_id",
"node_stats.node_master",
@ -32,6 +32,7 @@ public class NodeStatsRenderer extends AbstractRenderer<NodeStatsMarvelDoc> {
"node_stats.fs.total.total_in_bytes",
"node_stats.fs.total.free_in_bytes",
"node_stats.fs.total.available_in_bytes",
"node_stats.os.load_average",
"node_stats.process.max_file_descriptors",
"node_stats.process.open_file_descriptors",
"node_stats.jvm.mem.heap_used_in_bytes",
@ -55,20 +56,17 @@ public class NodeStatsRenderer extends AbstractRenderer<NodeStatsMarvelDoc> {
protected void doRender(NodeStatsMarvelDoc marvelDoc, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.NODE_STATS);
NodeStatsMarvelDoc.Payload payload = marvelDoc.payload();
if (payload != null) {
builder.field(Fields.NODE_ID, marvelDoc.getNodeId());
builder.field(Fields.NODE_MASTER, marvelDoc.isNodeMaster());
builder.field(Fields.MLOCKALL, marvelDoc.isMlockall());
builder.field(Fields.DISK_THRESHOLD_ENABLED, marvelDoc.isDiskThresholdDeciderEnabled());
builder.field(Fields.DISK_THRESHOLD_WATERMARK_HIGH, marvelDoc.getDiskThresholdWaterMarkHigh());
builder.field(Fields.NODE_ID, payload.getNodeId());
builder.field(Fields.NODE_MASTER, payload.isNodeMaster());
builder.field(Fields.MLOCKALL, payload.isMlockall());
builder.field(Fields.DISK_THRESHOLD_ENABLED, payload.isDiskThresholdDeciderEnabled());
builder.field(Fields.DISK_THRESHOLD_WATERMARK_HIGH, payload.getDiskThresholdWaterMarkHigh());
NodeStats nodeStats = payload.getNodeStats();
if (nodeStats != null) {
nodeStats.toXContent(builder, params);
}
NodeStats nodeStats = marvelDoc.getNodeStats();
if (nodeStats != null) {
nodeStats.toXContent(builder, params);
}
builder.endObject();
}

View File

@ -6,11 +6,13 @@
package org.elasticsearch.marvel.agent.settings;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.marvel.agent.AgentService;
public class MarvelSettingsModule extends AbstractModule {
public class MarvelModule extends AbstractModule {
@Override
protected void configure() {
bind(MarvelSettingsService.class).asEagerSingleton();
bind(MarvelSettings.class).asEagerSingleton();
bind(AgentService.class).asEagerSingleton();
}
}

View File

@ -6,26 +6,28 @@
package org.elasticsearch.marvel.agent.settings;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Arrays;
public abstract class MarvelSetting<V> {
private final String name;
private final String description;
private final V defaultValue;
private final boolean dynamic;
private volatile V value;
MarvelSetting(String name, String description, V defaultValue) {
MarvelSetting(String name, String description, V defaultValue, boolean dynamic) {
this.name = name;
this.description = description;
this.defaultValue = defaultValue;
this.value = defaultValue;
this.dynamic = dynamic;
}
abstract boolean onInit(Settings settings);
abstract boolean onRefresh(Settings settings);
public String getName() {
@ -36,15 +38,11 @@ public abstract class MarvelSetting<V> {
return description;
}
public V getDefaultValue() {
return defaultValue;
}
public V getValue() {
return value;
}
public void setValue(V value) {
public synchronized void setValue(V value) {
this.value = value;
}
@ -53,39 +51,46 @@ public abstract class MarvelSetting<V> {
}
public boolean isDynamic() {
return true;
return dynamic;
}
public String dynamicSettingName() {
return getName();
}
public static BooleanSetting booleanSetting(String name, Boolean defaultValue, String description) {
return new BooleanSetting(name, description, defaultValue);
public Validator dynamicValidator() {
return Validator.EMPTY;
}
public static StringSetting stringSetting(String name, String defaultValue, String description) {
return new StringSetting(name, description, defaultValue);
@Override
public String toString() {
return "marvel setting [" + getName() + " : " + getValueAsString() + "]";
}
public static StringArraySetting arraySetting(String name, String[] defaultValue, String description) {
return new StringArraySetting(name, description, defaultValue);
public static BooleanSetting booleanSetting(String name, Boolean defaultValue, String description, boolean dynamic) {
return new BooleanSetting(name, description, defaultValue, dynamic);
}
public static TimeValueSetting timeSetting(String name, TimeValue defaultValue, String description) {
return new TimeValueSetting(name, description, defaultValue);
public static StringSetting stringSetting(String name, String defaultValue, String description, boolean dynamic) {
return new StringSetting(name, description, defaultValue, dynamic);
}
public static StringArraySetting arraySetting(String name, String[] defaultValue, String description, boolean dynamic) {
return new StringArraySetting(name, description, defaultValue, dynamic);
}
public static TimeValueSetting timeSetting(String name, TimeValue defaultValue, String description, boolean dynamic) {
return new TimeValueSetting(name, description, defaultValue, dynamic);
}
public static TimeoutValueSetting timeoutSetting(String name, TimeValue defaultTimeoutValue, String description, boolean dynamic) {
return new TimeoutValueSetting(name, description, defaultTimeoutValue, dynamic);
}
static class BooleanSetting extends MarvelSetting<Boolean> {
BooleanSetting(String name, String description, Boolean defaultValue) {
super(name, description, defaultValue);
}
@Override
boolean onInit(Settings settings) {
setValue(settings.getAsBoolean(getName(), getDefaultValue()));
return true;
BooleanSetting(String name, String description, Boolean defaultValue, boolean dynamic) {
super(name, description, defaultValue, dynamic);
}
@Override
@ -97,18 +102,17 @@ public abstract class MarvelSetting<V> {
}
return false;
}
@Override
public Validator dynamicValidator() {
return Validator.BOOLEAN;
}
}
static class StringSetting extends MarvelSetting<String> {
StringSetting(String name, String description, String defaultValue) {
super(name, description, defaultValue);
}
@Override
boolean onInit(Settings settings) {
setValue(settings.get(getName(), getDefaultValue()));
return true;
StringSetting(String name, String description, String defaultValue, boolean dynamic) {
super(name, description, defaultValue, dynamic);
}
@Override
@ -124,29 +128,14 @@ public abstract class MarvelSetting<V> {
static class StringArraySetting extends MarvelSetting<String[]> {
StringArraySetting(String name, String description, String[] defaultValue) {
super(name, description, defaultValue);
}
@Override
boolean onInit(Settings settings) {
String[] a;
if (getDefaultValue() != null) {
a = settings.getAsArray(getName(), getDefaultValue(), true);
} else {
a = settings.getAsArray(getName());
}
if (a != null) {
setValue(a);
return true;
}
return false;
StringArraySetting(String name, String description, String[] defaultValue, boolean dynamic) {
super(name, description, defaultValue, dynamic);
}
@Override
boolean onRefresh(Settings settings) {
String[] updated = settings.getAsArray(getName(), null);
if (updated != null) {
if ((updated != null) && (!Arrays.equals(updated, getValue()))) {
setValue(updated);
return true;
}
@ -167,18 +156,8 @@ public abstract class MarvelSetting<V> {
static class TimeValueSetting extends MarvelSetting<TimeValue> {
TimeValueSetting(String name, String description, TimeValue defaultValue) {
super(name, description, defaultValue);
}
@Override
boolean onInit(Settings settings) {
TimeValue t = get(settings, getDefaultValue());
if (t != null) {
setValue(t);
return true;
}
return false;
TimeValueSetting(String name, String description, TimeValue defaultValue, boolean dynamic) {
super(name, description, defaultValue, dynamic);
}
@Override
@ -205,5 +184,22 @@ public abstract class MarvelSetting<V> {
}
return null;
}
@Override
public Validator dynamicValidator() {
return Validator.TIME;
}
}
static class TimeoutValueSetting extends TimeValueSetting {
TimeoutValueSetting(String name, String description, TimeValue defaultValue, boolean dynamic) {
super(name, description, defaultValue, dynamic);
}
@Override
public Validator dynamicValidator() {
return Validator.TIMEOUT;
}
}
}

View File

@ -0,0 +1,179 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.settings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.*;
import static org.elasticsearch.marvel.agent.settings.MarvelSetting.*;
public class MarvelSettings extends AbstractComponent implements NodeSettingsService.Listener {
private static final String PREFIX = MarvelPlugin.NAME + ".agent.";
public static final String MARVEL_DATA_INDEX_NAME = ".marvel-data";
public static final TimeValue MAX_LICENSE_GRACE_PERIOD = TimeValue.timeValueHours(7 * 24);
public static final String INTERVAL = PREFIX + "interval";
public static final String STARTUP_DELAY = PREFIX + "startup.delay";
public static final String INDEX_STATS_TIMEOUT = PREFIX + "index.stats.timeout";
public static final String INDICES = PREFIX + "indices";
public static final String CLUSTER_STATE_TIMEOUT = PREFIX + "cluster.state.timeout";
public static final String CLUSTER_STATS_TIMEOUT = PREFIX + "cluster.stats.timeout";
public static final String INDEX_RECOVERY_TIMEOUT = PREFIX + "index.recovery.timeout";
public static final String INDEX_RECOVERY_ACTIVE_ONLY = PREFIX + "index.recovery.active_only";
public static final String COLLECTORS = PREFIX + "collectors";
public static final String LICENSE_GRACE_PERIOD = PREFIX + "license.grace.period";
private static Map<String, ? extends MarvelSetting> MARVEL_SETTINGS = Collections.EMPTY_MAP;
static {
Map<String, MarvelSetting> map = new HashMap<>();
map.put(INTERVAL, timeSetting(INTERVAL, TimeValue.timeValueSeconds(10),
"Sampling interval between two collections (default to 10s)", true));
map.put(STARTUP_DELAY, timeSetting(STARTUP_DELAY, null,
"Waiting time before the agent start to collect data (default to sampling interval)", false));
map.put(INDEX_STATS_TIMEOUT, timeoutSetting(INDEX_STATS_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting indices statistics (default to 10m)", true));
map.put(INDICES, arraySetting(INDICES, Strings.EMPTY_ARRAY,
"List of indices names whose stats will be exported (default to all indices)", true));
map.put(CLUSTER_STATE_TIMEOUT, timeoutSetting(CLUSTER_STATE_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the cluster state (default to 10m)", true));
map.put(CLUSTER_STATS_TIMEOUT, timeoutSetting(CLUSTER_STATS_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the cluster statistics (default to 10m)", true));
map.put(INDEX_RECOVERY_TIMEOUT, timeoutSetting(INDEX_RECOVERY_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the recovery information (default to 10m)", true));
map.put(INDEX_RECOVERY_ACTIVE_ONLY, booleanSetting(INDEX_RECOVERY_ACTIVE_ONLY, Boolean.FALSE,
"INDEX_RECOVERY_ACTIVE_ONLY to indicate if only active recoveries should be collected (default to false: all recoveries are collected)", true));
map.put(COLLECTORS, arraySetting(COLLECTORS, Strings.EMPTY_ARRAY,
"List of collectors allowed to collect data (default to all)", false));
map.put(LICENSE_GRACE_PERIOD, timeSetting(LICENSE_GRACE_PERIOD, MAX_LICENSE_GRACE_PERIOD,
"Period during which the agent continues to collect data even if the license is expired (default to 7 days, cannot be greater than 7 days)", false));
MARVEL_SETTINGS = Collections.unmodifiableMap(map);
}
@Inject
public MarvelSettings(Settings clusterSettings, NodeSettingsService nodeSettingsService) {
super(clusterSettings);
logger.trace("initializing marvel settings:");
updateSettings(clusterSettings, false);
logger.trace("registering the service as a node settings listener");
nodeSettingsService.addListener(this);
}
@Override
public void onRefreshSettings(Settings clusterSettings) {
if (clusterSettings.names() == null || clusterSettings.names().isEmpty()) {
return;
}
updateSettings(clusterSettings, true);
}
private synchronized void updateSettings(Settings clusterSettings, boolean dynamicOnly) {
for (MarvelSetting setting : settings()) {
if (!dynamicOnly || setting.isDynamic()) {
if (setting.onRefresh(clusterSettings)) {
logger.info("{} updated", setting);
}
}
}
}
/**
* Returns the setting corresponding to the given name
*
* @param name The given name
* @return The associated setting, null if not found
*/
synchronized MarvelSetting getSetting(String name) {
MarvelSetting setting = MARVEL_SETTINGS.get(name);
if (setting == null) {
throw new IllegalArgumentException("no marvel setting initialized for [" + name + "]");
}
return setting;
}
/**
* Returns the settings corresponding to the given name
*
* @param name The given name
* @return The associated setting
*/
<T> T getSettingValue(String name) {
MarvelSetting setting = getSetting(name);
if (setting == null) {
throw new IllegalArgumentException("no marvel setting initialized for [" + name + "]");
}
return (T) setting.getValue();
}
public static Collection<? extends MarvelSetting> settings() {
return MARVEL_SETTINGS.values();
}
public static synchronized Collection<MarvelSetting> dynamicSettings() {
List<MarvelSetting> list = new ArrayList<>();
for (MarvelSetting setting : settings()) {
if (setting.isDynamic()) {
list.add(setting);
}
}
return list;
}
public TimeValue interval() {
return getSettingValue(INTERVAL);
}
public TimeValue startUpDelay() {
return getSettingValue(STARTUP_DELAY);
}
public TimeValue indexStatsTimeout() {
return getSettingValue(INDEX_STATS_TIMEOUT);
}
public String[] indices() {
return getSettingValue(INDICES);
}
public TimeValue clusterStateTimeout() {
return getSettingValue(CLUSTER_STATE_TIMEOUT);
}
public TimeValue clusterStatsTimeout() {
return getSettingValue(CLUSTER_STATS_TIMEOUT);
}
public TimeValue recoveryTimeout() {
return getSettingValue(INDEX_RECOVERY_TIMEOUT);
}
public boolean recoveryActiveOnly() {
return getSettingValue(INDEX_RECOVERY_ACTIVE_ONLY);
}
public String[] collectors() {
return getSettingValue(COLLECTORS);
}
public TimeValue licenseExpirationGracePeriod() {
TimeValue delay = getSettingValue(LICENSE_GRACE_PERIOD);
if ((delay.millis() >= 0) && (delay.millis() < MAX_LICENSE_GRACE_PERIOD.millis())) {
return delay;
}
return MAX_LICENSE_GRACE_PERIOD;
}
}

View File

@ -1,125 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.settings;
import com.google.common.collect.ImmutableList;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.settings.MarvelSetting.StringArraySetting;
import org.elasticsearch.marvel.agent.settings.MarvelSetting.TimeValueSetting;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.List;
public class MarvelSettingsService extends AbstractComponent implements NodeSettingsService.Listener {
private static final String PREFIX = MarvelPlugin.NAME + ".agent.";
private final List<MarvelSetting> settings;
public static final String INDEX_STATS_TIMEOUT = PREFIX + "index.stats.timeout";
final TimeValueSetting indexStatsTimeout = MarvelSetting.timeSetting(INDEX_STATS_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting indices statistics (default to 10m)");
public static final String INDICES = PREFIX + "indices";
final StringArraySetting indices = MarvelSetting.arraySetting(INDICES, Strings.EMPTY_ARRAY,
"List of indices names whose stats will be exported (default to all indices)");
public static final String CLUSTER_STATE_TIMEOUT = PREFIX + "cluster.state.timeout";
final TimeValueSetting clusterStateTimeout = MarvelSetting.timeSetting(CLUSTER_STATE_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the cluster state (default to 10m)");
public static final String CLUSTER_STATS_TIMEOUT = PREFIX + "cluster.stats.timeout";
final TimeValueSetting clusterStatsTimeout = MarvelSetting.timeSetting(CLUSTER_STATS_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the cluster statistics (default to 10m)");
public static final String INDEX_RECOVERY_TIMEOUT = PREFIX + "index.recovery.timeout";
final TimeValueSetting recoveryTimeout = MarvelSetting.timeSetting(INDEX_RECOVERY_TIMEOUT, TimeValue.timeValueMinutes(10),
"Timeout value when collecting the recovery information (default to 10m)");
public static final String INDEX_RECOVERY_ACTIVE_ONLY = PREFIX + "index.recovery.active_only";
final MarvelSetting.BooleanSetting recoveryActiveOnly = MarvelSetting.booleanSetting(INDEX_RECOVERY_ACTIVE_ONLY, Boolean.FALSE,
"Flag to indicate if only active recoveries should be collected (default to false: all recoveries are collected)");
MarvelSettingsService(Settings clusterSettings) {
super(clusterSettings);
// List of marvel settings
ImmutableList.Builder<MarvelSetting> builder = ImmutableList.builder();
builder.add(indexStatsTimeout);
builder.add(indices);
builder.add(clusterStateTimeout);
builder.add(clusterStatsTimeout);
builder.add(recoveryTimeout);
builder.add(recoveryActiveOnly);
this.settings = builder.build();
logger.trace("initializing marvel settings:");
for (MarvelSetting setting : settings) {
// Initialize all settings and register them as a dynamic settings
if (setting.onInit(clusterSettings)) {
logger.trace("\t{} ({}) initialized to [{}]", setting.getName(), setting.getDescription(), setting.getValueAsString());
} else {
logger.trace("\t{} ({}) initialized", setting.getName(), setting.getDescription());
}
}
}
@Inject
public MarvelSettingsService(Settings clusterSettings, NodeSettingsService nodeSettingsService, @ClusterDynamicSettings DynamicSettings dynamicSettings) {
this(clusterSettings);
logger.trace("registering dynamic marvel settings:");
for (MarvelSetting setting : settings) {
if (setting.isDynamic()) {
logger.trace("dynamic setting [{}] registered", setting.getName());
dynamicSettings.addDynamicSetting(setting.dynamicSettingName());
}
}
logger.trace("registering the service as a node settings listener");
nodeSettingsService.addListener(this);
}
@Override
public void onRefreshSettings(Settings clusterSettings) {
for (MarvelSetting setting : settings) {
if (setting.onRefresh(clusterSettings)) {
logger.trace("setting [{}] updated to [{}]", setting.getName(), setting.getValueAsString());
}
}
}
public TimeValue indexStatsTimeout() {
return indexStatsTimeout.getValue();
}
public String[] indices() {
return indices.getValue();
}
public TimeValue clusterStateTimeout() {
return clusterStateTimeout.getValue();
}
public TimeValue clusterStatsTimeout() {
return clusterStatsTimeout.getValue();
}
public TimeValue recoveryTimeout() {
return recoveryTimeout.getValue();
}
public boolean recoveryActiveOnly() {
return recoveryActiveOnly.getValue();
}
}

View File

@ -7,6 +7,7 @@ package org.elasticsearch.marvel.agent.support;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -34,9 +35,9 @@ public class AgentUtils {
InetSocketAddress inetSocketAddress = address.address();
InetAddress inetAddress = inetSocketAddress.getAddress();
if (inetAddress != null) {
builder.field("ip", inetAddress.getHostAddress());
builder.field("host", inetAddress.getHostName());
builder.field("ip_port", inetAddress.getHostAddress() + ":" + inetSocketAddress.getPort());
builder.field("ip", NetworkAddress.formatAddress(inetAddress));
builder.field("host", inetSocketAddress.getHostString());
builder.field("ip_port", NetworkAddress.formatAddress(inetSocketAddress));
}
} else if (node.address().uniqueAddressTypeId() == 2) { // local transport
builder.field("ip_port", "_" + node.address()); // will end up being "_local[ID]"
@ -58,19 +59,6 @@ public class AgentUtils {
return builder;
}
public static String nodeDescription(DiscoveryNode node) {
StringBuilder builder = new StringBuilder().append("[").append(node.name()).append("]");
if (node.address().uniqueAddressTypeId() == 1) { // InetSocket
InetSocketTransportAddress address = (InetSocketTransportAddress) node.address();
InetSocketAddress inetSocketAddress = address.address();
InetAddress inetAddress = inetSocketAddress.getAddress();
if (inetAddress != null) {
builder.append("[").append(inetAddress.getHostAddress()).append(":").append(inetSocketAddress.getPort()).append("]");
}
}
return builder.toString();
}
public static String[] extractHostsFromAddress(BoundTransportAddress boundAddress, ESLogger logger) {
if (boundAddress == null || boundAddress.boundAddress() == null) {
logger.debug("local http server is not yet started. can't connect");
@ -89,13 +77,7 @@ public class AgentUtils {
return null;
}
String host = inetAddress.getHostAddress();
if (host.indexOf(":") >= 0) {
// ipv6
host = "[" + host + "]";
}
return new String[]{host + ":" + inetSocketAddress.getPort()};
return new String[]{ NetworkAddress.formatAddress(inetSocketAddress) };
}
public static URL parseHostWithPath(String host, String path) throws URISyntaxException, MalformedURLException {

View File

@ -5,10 +5,7 @@
*/
package org.elasticsearch.marvel.license;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.license.plugin.LicenseVersion;
import org.elasticsearch.marvel.MarvelVersion;
public class LicenseModule extends AbstractModule {
@ -27,11 +24,5 @@ public class LicenseModule extends AbstractModule {
} catch (ClassNotFoundException cnfe) {
throw new IllegalStateException("marvel plugin requires the license plugin to be installed");
}
if (LicenseVersion.CURRENT.before(MarvelVersion.CURRENT.minLicenseCompatibilityVersion)) {
throw new ElasticsearchException("marvel [" + MarvelVersion.CURRENT +
"] requires minimum license plugin version [" + MarvelVersion.CURRENT.minLicenseCompatibilityVersion +
"], but installed license plugin version is [" + LicenseVersion.CURRENT + "]");
}
}
}

View File

@ -14,13 +14,13 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.license.core.License;
import org.elasticsearch.license.plugin.core.LicensesClientService;
import org.elasticsearch.license.plugin.core.LicensesManagerService;
import org.elasticsearch.license.plugin.core.LicensesService;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.mode.Mode;
import java.util.Arrays;
import java.util.Collection;
import java.util.Locale;
import java.util.*;
public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
@ -31,15 +31,22 @@ public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
private static final FormatDateTimeFormatter DATE_FORMATTER = Joda.forPattern("EEEE, MMMMM dd, yyyy", Locale.ROOT);
private final LicensesManagerService managerService;
private final LicensesClientService clientService;
private final MarvelSettings marvelSettings;
private final Collection<LicensesService.ExpirationCallback> expirationLoggers;
private final LicensesClientService.AcknowledgementCallback acknowledgementCallback;
private volatile Mode mode;
private volatile boolean enabled;
private volatile long expiryDate;
@Inject
public LicenseService(Settings settings, LicensesClientService clientService) {
public LicenseService(Settings settings, LicensesClientService clientService, LicensesManagerService managerService, MarvelSettings marvelSettings) {
super(settings);
this.managerService = managerService;
this.clientService = clientService;
this.marvelSettings = marvelSettings;
this.mode = Mode.LITE;
this.expirationLoggers = Arrays.asList(
new LicensesService.ExpirationCallback.Pre(days(7), days(30), days(1)) {
@ -73,11 +80,20 @@ public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
}
}
);
this.acknowledgementCallback = new LicensesClientService.AcknowledgementCallback() {
@Override
public List<String> acknowledge(License currentLicense, License newLicense) {
// TODO: add messages to be acknowledged when installing newLicense from currentLicense
// NOTE: currentLicense can be null, as a license registration can happen before
// a trial license could be generated
return Collections.emptyList();
}
};
}
@Override
protected void doStart() throws ElasticsearchException {
clientService.register(FEATURE_NAME, TRIAL_LICENSE_OPTIONS, expirationLoggers, new InternalListener(this));
clientService.register(FEATURE_NAME, TRIAL_LICENSE_OPTIONS, expirationLoggers, acknowledgementCallback, new InternalListener(this));
}
@Override
@ -103,6 +119,35 @@ public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
return mode;
}
/**
* @return all registered licenses
*/
public List<License> licenses() {
return managerService.getLicenses();
}
/**
* @return true if the marvel license is enabled
*/
public boolean enabled() {
return enabled;
}
/**
* @return true if marvel is running within the "grace period", ie when the license
* is expired but a given extra delay is not yet elapsed
*/
public boolean inExpirationGracePeriod() {
return System.currentTimeMillis() <= (expiryDate() + marvelSettings.licenseExpirationGracePeriod().millis());
}
/**
* @return the license's expiration date (as a long)
*/
public long expiryDate() {
return expiryDate;
}
class InternalListener implements LicensesClientService.Listener {
private final LicenseService service;
@ -114,6 +159,8 @@ public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
@Override
public void onEnabled(License license) {
try {
service.enabled = true;
service.expiryDate = license.expiryDate();
service.mode = Mode.fromName(license.type());
} catch (IllegalArgumentException e) {
service.mode = Mode.LITE;
@ -122,6 +169,8 @@ public class LicenseService extends AbstractLifecycleComponent<LicenseService> {
@Override
public void onDisabled(License license) {
service.enabled = false;
service.expiryDate = license.expiryDate();
service.mode = Mode.LITE;
}
}

View File

@ -15,7 +15,7 @@
},
"date_detection": false,
"properties": {
"cluster_name": {
"cluster_uuid": {
"type": "string",
"index": "not_analyzed"
},
@ -33,6 +33,17 @@
"type": "string",
"index": "not_analyzed"
},
"primaries": {
"properties": {
"docs": {
"properties": {
"count": {
"type": "long"
}
}
}
}
},
"total": {
"properties": {
"docs": {
@ -54,10 +65,47 @@
},
"indexing": {
"properties": {
"index_total": {
"type": "long"
},
"index_time_in_millis": {
"type": "long"
},
"throttle_time_in_millis": {
"type": "long"
}
}
},
"merges": {
"properties": {
"total_size_in_bytes": {
"type": "long"
}
}
},
"search": {
"properties": {
"query_total": {
"type": "long"
},
"query_time_in_millis": {
"type": "long"
}
}
},
"segments": {
"properties": {
"memory_in_bytes": {
"type": "long"
}
}
},
"refresh": {
"properties": {
"total_time_in_millis": {
"type": "long"
}
}
}
}
}
@ -104,6 +152,9 @@
}
}
},
"cluster_licenses": {
"enabled": false
},
"marvel_node_stats": {
"properties": {
"node_stats": {

View File

@ -0,0 +1,29 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel;
import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.license.plugin.LicensePlugin;
/**
* Main class to easily run Marvel from a IDE.
* <p/>
* In order to run this class set configure the following:
* 1) Set `-Des.path.home=` to a directory containing an ES config directory
*/
public class MarvelLauncher {
public static void main(String[] args) throws Throwable {
System.setProperty("es.script.inline", "on");
System.setProperty("es.security.manager.enabled", "false");
System.setProperty("es.plugins.load_classpath_plugins", "false");
System.setProperty("es.plugin.types", MarvelPlugin.class.getName() + "," + LicensePlugin.class.getName());
System.setProperty("es.cluster.name", MarvelLauncher.class.getSimpleName());
Elasticsearch.main(new String[]{"start"});
}
}

View File

@ -26,7 +26,7 @@ public class MarvelPluginClientTests extends ESTestCase {
MarvelPlugin plugin = new MarvelPlugin(settings);
assertThat(plugin.isEnabled(), is(false));
Collection<Class<? extends Module>> modules = plugin.modules();
Collection<Module> modules = plugin.nodeModules();
assertThat(modules.size(), is(0));
}
@ -38,8 +38,8 @@ public class MarvelPluginClientTests extends ESTestCase {
.build();
MarvelPlugin plugin = new MarvelPlugin(settings);
assertThat(plugin.isEnabled(), is(true));
Collection<Class<? extends Module>> modules = plugin.modules();
assertThat(modules.size(), is(1));
Collection<Module> modules = plugin.nodeModules();
assertThat(modules.size(), is(5));
}
}

View File

@ -1,24 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import static org.hamcrest.Matchers.equalTo;
public class MarvelVersionTests extends ESTestCase {
@Test
public void testVersionFromString() {
assertThat(MarvelVersion.fromString("2.0.0-beta1"), equalTo(MarvelVersion.V_2_0_0_Beta1));
}
@Test
public void testVersionNumber() {
assertThat(MarvelVersion.V_2_0_0_Beta1.number(), equalTo("2.0.0-beta1"));
}
}

View File

@ -0,0 +1,204 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.collector;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.SysGlobals;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.license.core.License;
import org.elasticsearch.license.plugin.core.LicensesClientService;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseIntegrationTests;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.junit.Before;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
@ClusterScope(scope = ESIntegTestCase.Scope.SUITE, randomDynamicTemplates = false, transportClientRatio = 0.0)
public class AbstractCollectorTestCase extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.settingsBuilder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", MarvelPlugin.class.getName() + "," + LicensePluginForCollectors.class.getName())
.build();
}
@Before
public void ensureLicenseIsEnabled() {
enableLicense();
}
protected void assertCanCollect(AbstractCollector collector) {
assertNotNull(collector);
assertTrue("collector [" + collector.name() + "] should be able to collect data", collector.canCollect());
Collection results = collector.collect();
assertTrue(results != null && !results.isEmpty());
}
protected void assertCannotCollect(AbstractCollector collector) {
assertNotNull(collector);
assertFalse("collector [" + collector.name() + "] should not be able to collect data", collector.canCollect());
Collection results = collector.collect();
assertTrue(results == null || results.isEmpty());
}
private static License createTestingLicense(long issueDate, long expiryDate) {
return License.builder()
.feature(LicenseService.FEATURE_NAME)
.expiryDate(expiryDate)
.issueDate(issueDate)
.issuedTo("AbstractCollectorTestCase")
.issuer("test")
.maxNodes(Integer.MAX_VALUE)
.signature("_signature")
.type("standard")
.subscriptionType("all_is_good")
.uid(String.valueOf(RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0)) + System.identityHashCode(LicenseIntegrationTests.class))
.build();
}
protected static void enableLicense() {
long issueDate = System.currentTimeMillis();
long expiryDate = issueDate + randomDaysInMillis();
final License license = createTestingLicense(issueDate, expiryDate);
for (LicenseServiceForCollectors service : internalCluster().getInstances(LicenseServiceForCollectors.class)) {
service.enable(license);
}
}
protected static void beginGracefulPeriod() {
long expiryDate = System.currentTimeMillis() + TimeValue.timeValueMinutes(10).millis();
long issueDate = expiryDate - randomDaysInMillis();
final License license = createTestingLicense(issueDate, expiryDate);
for (LicenseServiceForCollectors service : internalCluster().getInstances(LicenseServiceForCollectors.class)) {
service.disable(license);
}
}
protected static void endGracefulPeriod() {
long expiryDate = System.currentTimeMillis() - MarvelSettings.MAX_LICENSE_GRACE_PERIOD.millis() - TimeValue.timeValueMinutes(10).millis();
long issueDate = expiryDate - randomDaysInMillis();
final License license = createTestingLicense(issueDate, expiryDate);
for (LicenseServiceForCollectors service : internalCluster().getInstances(LicenseServiceForCollectors.class)) {
service.disable(license);
}
}
protected static void disableLicense() {
long expiryDate = System.currentTimeMillis() - MarvelSettings.MAX_LICENSE_GRACE_PERIOD.millis() - randomDaysInMillis();
long issueDate = expiryDate - randomDaysInMillis();
final License license = createTestingLicense(issueDate, expiryDate);
for (LicenseServiceForCollectors service : internalCluster().getInstances(LicenseServiceForCollectors.class)) {
service.disable(license);
}
}
private static long randomDaysInMillis() {
return TimeValue.timeValueHours(randomIntBetween(1, 30) * 24).millis();
}
public void waitForNoBlocksOnNodes() throws Exception {
assertBusy(new Runnable() {
@Override
public void run() {
for (String nodeId : internalCluster().getNodeNames()) {
try {
assertTrue(waitForNoBlocksOnNode(nodeId));
} catch (Exception e) {
fail("failed to wait for no blocks on node [" + nodeId + "]: " + e.getMessage());
}
}
}
});
}
public boolean waitForNoBlocksOnNode(final String nodeId) throws Exception {
return assertBusy(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
ClusterBlocks clusterBlocks = client(nodeId).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState().blocks();
assertTrue(clusterBlocks.global().isEmpty());
assertTrue(clusterBlocks.indices().values().isEmpty());
return true;
}
}, 30L, TimeUnit.SECONDS);
}
public static class LicensePluginForCollectors extends Plugin {
public static final String NAME = "internal-test-licensing";
@Override
public String name() {
return NAME;
}
@Override
public String description() {
return name();
}
@Override
public Collection<Module> nodeModules() {
return Collections.<Module>singletonList(new AbstractModule(){
@Override
protected void configure() {
bind(LicenseServiceForCollectors.class).asEagerSingleton();
bind(LicensesClientService.class).to(LicenseServiceForCollectors.class);
}
});
}
}
public static class LicenseServiceForCollectors extends AbstractComponent implements LicensesClientService {
private final List<Listener> listeners = new ArrayList<>();
@Inject
public LicenseServiceForCollectors(Settings settings) {
super(settings);
}
@Override
public void register(String feature, TrialLicenseOptions trialLicenseOptions, Collection<ExpirationCallback> expirationCallbacks, AcknowledgementCallback acknowledgementCallback, Listener listener) {
listeners.add(listener);
}
public void enable(License license) {
for (Listener listener : listeners) {
listener.onEnabled(license);
}
}
public void disable(License license) {
for (Listener listener : listeners) {
listener.onDisabled(license);
}
}
}
}

View File

@ -5,22 +5,24 @@
*/
package org.elasticsearch.marvel.agent.collector.cluster;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollectorTestCase;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.junit.Test;
import java.util.Collection;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.*;
public class ClusterStateCollectorTests extends ESSingleNodeTestCase {
public class ClusterStateCollectorTests extends AbstractCollectorTestCase {
@Test
public void testClusterStateCollectorNoIndices() throws Exception {
@ -32,22 +34,22 @@ public class ClusterStateCollectorTests extends ESSingleNodeTestCase {
assertThat(marvelDoc, instanceOf(ClusterStateMarvelDoc.class));
ClusterStateMarvelDoc clusterStateMarvelDoc = (ClusterStateMarvelDoc) marvelDoc;
assertThat(clusterStateMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(clusterStateMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(clusterStateMarvelDoc.timestamp(), greaterThan(0L));
assertThat(clusterStateMarvelDoc.type(), equalTo(ClusterStateCollector.TYPE));
assertNotNull(clusterStateMarvelDoc.getClusterState());
ClusterStateMarvelDoc.Payload payload = clusterStateMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getClusterState());
ClusterState clusterState = payload.getClusterState();
ClusterState clusterState = clusterStateMarvelDoc.getClusterState();
assertThat(clusterState.getRoutingTable().allShards(), hasSize(0));
}
@Test
public void testClusterStateCollectorOneIndex() throws Exception {
int nbShards = randomIntBetween(1, 5);
createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, nbShards).build());
assertAcked(prepareCreate("test").setSettings(Settings.settingsBuilder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, nbShards)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build()));
int nbDocs = randomIntBetween(1, 20);
for (int i = 0; i < nbDocs; i++) {
@ -64,15 +66,13 @@ public class ClusterStateCollectorTests extends ESSingleNodeTestCase {
assertThat(marvelDoc, instanceOf(ClusterStateMarvelDoc.class));
ClusterStateMarvelDoc clusterStateMarvelDoc = (ClusterStateMarvelDoc) marvelDoc;
assertThat(clusterStateMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(clusterStateMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(clusterStateMarvelDoc.timestamp(), greaterThan(0L));
assertThat(clusterStateMarvelDoc.type(), equalTo(ClusterStateCollector.TYPE));
ClusterStateMarvelDoc.Payload payload = clusterStateMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getClusterState());
assertNotNull(clusterStateMarvelDoc.getClusterState());
ClusterState clusterState = payload.getClusterState();
ClusterState clusterState = clusterStateMarvelDoc.getClusterState();
assertThat(clusterState.getRoutingTable().allShards("test"), hasSize(nbShards));
}
@ -84,7 +84,10 @@ public class ClusterStateCollectorTests extends ESSingleNodeTestCase {
for (int i = 0; i < nbIndices; i++) {
shardsPerIndex[i] = randomIntBetween(1, 5);
createIndex("test-" + i, Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardsPerIndex[i]).build());
assertAcked(prepareCreate("test-" + i).setSettings(Settings.settingsBuilder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, shardsPerIndex[i])
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build()));
docsPerIndex[i] = randomIntBetween(1, 20);
for (int j = 0; j < docsPerIndex[i]; j++) {
@ -103,25 +106,63 @@ public class ClusterStateCollectorTests extends ESSingleNodeTestCase {
assertThat(marvelDoc, instanceOf(ClusterStateMarvelDoc.class));
ClusterStateMarvelDoc clusterStateMarvelDoc = (ClusterStateMarvelDoc) marvelDoc;
assertThat(clusterStateMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(clusterStateMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(clusterStateMarvelDoc.timestamp(), greaterThan(0L));
assertThat(clusterStateMarvelDoc.type(), equalTo(ClusterStateCollector.TYPE));
assertNotNull(clusterStateMarvelDoc.getClusterState());
ClusterStateMarvelDoc.Payload payload = clusterStateMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getClusterState());
ClusterState clusterState = payload.getClusterState();
ClusterState clusterState = clusterStateMarvelDoc.getClusterState();
for (int i = 0; i < nbIndices; i++) {
assertThat(clusterState.getRoutingTable().allShards("test-" + i), hasSize(shardsPerIndex[i]));
}
}
@Test
public void tesClusterStateCollectorWithLicensing() {
String[] nodes = internalCluster().getNodeNames();
for (String node : nodes) {
logger.debug("--> creating a new instance of the collector");
ClusterStateCollector collector = newClusterStateCollector(node);
assertNotNull(collector);
logger.debug("--> enabling license and checks that the collector can collect data if node is master");
enableLicense();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> starting graceful period and checks that the collector can still collect data if node is master");
beginGracefulPeriod();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> ending graceful period and checks that the collector cannot collect data");
endGracefulPeriod();
assertCannotCollect(collector);
logger.debug("--> disabling license and checks that the collector cannot collect data");
disableLicense();
assertCannotCollect(collector);
}
}
private ClusterStateCollector newClusterStateCollector() {
return new ClusterStateCollector(getInstanceFromNode(Settings.class),
getInstanceFromNode(ClusterService.class),
getInstanceFromNode(ClusterName.class),
getInstanceFromNode(MarvelSettingsService.class),
client());
return newClusterStateCollector(null);
}
private ClusterStateCollector newClusterStateCollector(String nodeId) {
if (!Strings.hasText(nodeId)) {
nodeId = randomFrom(internalCluster().getNodeNames());
}
return new ClusterStateCollector(internalCluster().getInstance(Settings.class, nodeId),
internalCluster().getInstance(ClusterService.class, nodeId),
internalCluster().getInstance(MarvelSettings.class, nodeId),
internalCluster().getInstance(LicenseService.class, nodeId),
client(nodeId));
}
}

View File

@ -5,19 +5,20 @@
*/
package org.elasticsearch.marvel.agent.collector.cluster;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.marvel.agent.collector.AbstractCollectorTestCase;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.junit.Test;
import java.util.Collection;
import static org.hamcrest.Matchers.*;
public class ClusterStatsCollectorTests extends ESIntegTestCase {
public class ClusterStatsCollectorTests extends AbstractCollectorTestCase {
@Test
public void testClusterStatsCollector() throws Exception {
@ -29,21 +30,60 @@ public class ClusterStatsCollectorTests extends ESIntegTestCase {
assertThat(marvelDoc, instanceOf(ClusterStatsMarvelDoc.class));
ClusterStatsMarvelDoc clusterStatsMarvelDoc = (ClusterStatsMarvelDoc) marvelDoc;
assertThat(clusterStatsMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(clusterStatsMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(clusterStatsMarvelDoc.timestamp(), greaterThan(0L));
assertThat(clusterStatsMarvelDoc.type(), equalTo(ClusterStatsCollector.TYPE));
ClusterStatsMarvelDoc.Payload payload = clusterStatsMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getClusterStats());
assertThat(payload.getClusterStats().getNodesStats().getCounts().getTotal(), equalTo(internalCluster().getNodeNames().length));
assertNotNull(clusterStatsMarvelDoc.getClusterStats());
assertThat(clusterStatsMarvelDoc.getClusterStats().getNodesStats().getCounts().getTotal(), equalTo(internalCluster().getNodeNames().length));
}
@Test
public void tesClusterStatsCollectorWithLicensing() {
String[] nodes = internalCluster().getNodeNames();
for (String node : nodes) {
logger.debug("--> creating a new instance of the collector");
ClusterStatsCollector collector = newClusterStatsCollector(node);
assertNotNull(collector);
logger.debug("--> enabling license and checks that the collector can collect data if node is master");
enableLicense();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> starting graceful period and checks that the collector can still collect data if node is master");
beginGracefulPeriod();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> ending graceful period and checks that the collector cannot collect data");
endGracefulPeriod();
assertCannotCollect(collector);
logger.debug("--> disabling license and checks that the collector cannot collect data");
disableLicense();
assertCannotCollect(collector);
}
}
private ClusterStatsCollector newClusterStatsCollector() {
return new ClusterStatsCollector(internalCluster().getInstance(Settings.class),
internalCluster().getInstance(ClusterService.class),
internalCluster().getInstance(ClusterName.class),
internalCluster().getInstance(MarvelSettingsService.class),
client());
return newClusterStatsCollector(null);
}
private ClusterStatsCollector newClusterStatsCollector(String nodeId) {
if (!Strings.hasText(nodeId)) {
nodeId = randomFrom(internalCluster().getNodeNames());
}
return new ClusterStatsCollector(internalCluster().getInstance(Settings.class, nodeId),
internalCluster().getInstance(ClusterService.class, nodeId),
internalCluster().getInstance(MarvelSettings.class, nodeId),
internalCluster().getInstance(LicenseService.class, nodeId),
client(nodeId));
}
}

View File

@ -7,21 +7,20 @@ package org.elasticsearch.marvel.agent.collector.indices;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.marvel.agent.collector.AbstractCollectorTestCase;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.Test;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
@ -31,34 +30,35 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC
import static org.hamcrest.Matchers.*;
@ESIntegTestCase.ClusterScope(numDataNodes = 0)
public class IndexRecoveryCollectorTests extends ESIntegTestCase {
public class IndexRecoveryCollectorTests extends AbstractCollectorTestCase {
private boolean activeOnly = false;
private final boolean activeOnly = false;
private final String indexName = "test";
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return settingsBuilder()
.put(super.nodeSettings(nodeOrdinal))
.put(MarvelSettingsService.INDEX_RECOVERY_ACTIVE_ONLY, activeOnly)
.put(MarvelSettings.INDEX_RECOVERY_ACTIVE_ONLY, activeOnly)
.put(MarvelSettings.INDICES, indexName)
.build();
}
@Test
public void testIndexRecoveryCollector() throws Exception {
final String indexName = "test";
logger.info("--> start first node");
final String node1 = internalCluster().startNode();
waitForNoBlocksOnNode(node1);
logger.info("--> collect index recovery data");
Collection<MarvelDoc> results = newIndexRecoveryCollector().doCollect();
Collection<MarvelDoc> results = newIndexRecoveryCollector(node1).doCollect();
logger.info("--> no indices created, expecting 0 marvel documents");
assertNotNull(results);
assertThat(results, is(empty()));
logger.info("--> create index on node: {}", node1);
logger.info("--> create index [{}] on node [{}]", indexName, node1);
assertAcked(prepareCreate(indexName, 1, settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 3).put(SETTING_NUMBER_OF_REPLICAS, 1)));
logger.info("--> indexing sample data");
@ -66,20 +66,25 @@ public class IndexRecoveryCollectorTests extends ESIntegTestCase {
for (int i = 0; i < numDocs; i++) {
client().prepareIndex(indexName, "foo").setSource("value", randomInt()).get();
}
flushAndRefresh(indexName);
logger.info("--> create a second index [other] that won't be part of stats collection", indexName, node1);
client().prepareIndex("other", "bar").setSource("value", randomInt()).get();
flushAndRefresh();
assertHitCount(client().prepareCount(indexName).get(), numDocs);
assertHitCount(client().prepareCount("other").get(), 1L);
logger.info("--> start second node");
final String node2 = internalCluster().startNode();
waitForNoBlocksOnNode(node2);
waitForRelocation();
for (MarvelSettingsService marvelSettingsService : internalCluster().getInstances(MarvelSettingsService.class)) {
assertThat(marvelSettingsService.recoveryActiveOnly(), equalTo(activeOnly));
for (MarvelSettings marvelSettings : internalCluster().getInstances(MarvelSettings.class)) {
assertThat(marvelSettings.recoveryActiveOnly(), equalTo(activeOnly));
}
logger.info("--> collect index recovery data");
results = newIndexRecoveryCollector().doCollect();
results = newIndexRecoveryCollector(null).doCollect();
logger.info("--> we should have at least 1 shard in relocation state");
assertNotNull(results);
@ -90,14 +95,11 @@ public class IndexRecoveryCollectorTests extends ESIntegTestCase {
assertThat(marvelDoc, instanceOf(IndexRecoveryMarvelDoc.class));
IndexRecoveryMarvelDoc indexRecoveryMarvelDoc = (IndexRecoveryMarvelDoc) marvelDoc;
assertThat(indexRecoveryMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(indexRecoveryMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(indexRecoveryMarvelDoc.timestamp(), greaterThan(0L));
assertThat(indexRecoveryMarvelDoc.type(), equalTo(IndexRecoveryCollector.TYPE));
IndexRecoveryMarvelDoc.Payload payload = indexRecoveryMarvelDoc.payload();
assertNotNull(payload);
RecoveryResponse recovery = payload.getRecoveryResponse();
RecoveryResponse recovery = indexRecoveryMarvelDoc.getRecoveryResponse();
assertNotNull(recovery);
Map<String, List<ShardRecoveryResponse>> shards = recovery.shardResponses();
@ -109,28 +111,54 @@ public class IndexRecoveryCollectorTests extends ESIntegTestCase {
assertThat(shardRecoveries.size(), greaterThan(0));
for (ShardRecoveryResponse shardRecovery : shardRecoveries) {
assertThat(shardRecovery.getIndex(), equalTo(indexName));
assertThat(shardRecovery.recoveryState().getType(), anyOf(equalTo(RecoveryState.Type.RELOCATION), equalTo(RecoveryState.Type.STORE), equalTo(RecoveryState.Type.REPLICA)));
}
}
}
private IndexRecoveryCollector newIndexRecoveryCollector() {
return new IndexRecoveryCollector(internalCluster().getInstance(Settings.class),
internalCluster().getInstance(ClusterService.class),
internalCluster().getInstance(ClusterName.class),
internalCluster().getInstance(MarvelSettingsService.class),
client());
@Test
public void tesIndexRecoveryCollectorWithLicensing() {
String[] nodes = internalCluster().getNodeNames();
for (String node : nodes) {
logger.debug("--> creating a new instance of the collector");
IndexRecoveryCollector collector = newIndexRecoveryCollector(node);
assertNotNull(collector);
logger.debug("--> enabling license and checks that the collector can collect data if node is master");
enableLicense();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> starting graceful period and checks that the collector can still collect data if node is master");
beginGracefulPeriod();
if (node.equals(internalCluster().getMasterName())) {
assertCanCollect(collector);
} else {
assertCannotCollect(collector);
}
logger.debug("--> ending graceful period and checks that the collector cannot collect data");
endGracefulPeriod();
assertCannotCollect(collector);
logger.debug("--> disabling license and checks that the collector cannot collect data");
disableLicense();
assertCannotCollect(collector);
}
}
public void waitForNoBlocksOnNode(final String nodeId) throws Exception {
assertBusy(new Callable<Void>() {
@Override
public Void call() throws Exception {
ClusterBlocks clusterBlocks = client(nodeId).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState().blocks();
assertTrue(clusterBlocks.global().isEmpty());
assertTrue(clusterBlocks.indices().values().isEmpty());
return null;
}
}, 30L, TimeUnit.SECONDS);
private IndexRecoveryCollector newIndexRecoveryCollector(String nodeId) {
if (!Strings.hasText(nodeId)) {
nodeId = randomFrom(internalCluster().getNodeNames());
}
return new IndexRecoveryCollector(internalCluster().getInstance(Settings.class, nodeId),
internalCluster().getInstance(ClusterService.class, nodeId),
internalCluster().getInstance(MarvelSettings.class, nodeId),
internalCluster().getInstance(LicenseService.class, nodeId),
client(nodeId));
}
}

View File

@ -5,15 +5,13 @@
*/
package org.elasticsearch.marvel.agent.collector.indices;
import com.google.common.collect.ImmutableSet;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.marvel.agent.collector.AbstractCollectorTestCase;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.junit.Test;
import java.util.Collection;
@ -22,11 +20,11 @@ import java.util.Iterator;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.*;
public class IndexStatsCollectorTests extends ESSingleNodeTestCase {
public class IndexStatsCollectorTests extends AbstractCollectorTestCase {
@Test
public void testIndexStatsCollectorNoIndices() throws Exception {
waitForNoBlocksOnNode();
waitForNoBlocksOnNodes();
Collection<MarvelDoc> results = newIndexStatsCollector().doCollect();
assertThat(results, is(empty()));
@ -34,15 +32,19 @@ public class IndexStatsCollectorTests extends ESSingleNodeTestCase {
@Test
public void testIndexStatsCollectorOneIndex() throws Exception {
waitForNoBlocksOnNode();
waitForNoBlocksOnNodes();
int nbDocs = randomIntBetween(1, 20);
final String indexName = "test_" + randomInt();
final int nbDocs = randomIntBetween(1, 20);
for (int i = 0; i < nbDocs; i++) {
client().prepareIndex("test", "test").setSource("num", i).get();
client().prepareIndex(indexName, "test").setSource("num", i).get();
}
client().admin().indices().prepareRefresh().get();
assertHitCount(client().prepareCount().get(), nbDocs);
waitForRelocation();
Collection<MarvelDoc> results = newIndexStatsCollector().doCollect();
assertThat(results, hasSize(1));
@ -51,47 +53,50 @@ public class IndexStatsCollectorTests extends ESSingleNodeTestCase {
assertThat(marvelDoc, instanceOf(IndexStatsMarvelDoc.class));
IndexStatsMarvelDoc indexStatsMarvelDoc = (IndexStatsMarvelDoc) marvelDoc;
assertThat(indexStatsMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(indexStatsMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(indexStatsMarvelDoc.timestamp(), greaterThan(0L));
assertThat(indexStatsMarvelDoc.type(), equalTo(IndexStatsCollector.TYPE));
IndexStatsMarvelDoc.Payload payload = indexStatsMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getIndexStats());
IndexStats indexStats = indexStatsMarvelDoc.getIndexStats();
assertNotNull(indexStats);
assertThat(payload.getIndexStats().getIndex(), equalTo("test"));
assertThat(payload.getIndexStats().getTotal().getDocs().getCount(), equalTo((long) nbDocs));
assertNotNull(payload.getIndexStats().getTotal().getStore());
assertThat(payload.getIndexStats().getTotal().getStore().getSizeInBytes(), greaterThan(0L));
assertThat(payload.getIndexStats().getTotal().getStore().getThrottleTime().millis(), equalTo(0L));
assertNotNull(payload.getIndexStats().getTotal().getIndexing());
assertThat(payload.getIndexStats().getTotal().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0L));
assertThat(indexStats.getIndex(), equalTo(indexName));
assertThat(indexStats.getPrimaries().getDocs().getCount(), equalTo((long) nbDocs));
assertNotNull(indexStats.getTotal().getStore());
assertThat(indexStats.getTotal().getStore().getSizeInBytes(), greaterThan(0L));
assertThat(indexStats.getTotal().getStore().getThrottleTime().millis(), equalTo(0L));
assertNotNull(indexStats.getTotal().getIndexing());
assertThat(indexStats.getTotal().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0L));
}
@Test
public void testIndexStatsCollectorMultipleIndices() throws Exception {
waitForNoBlocksOnNode();
waitForNoBlocksOnNodes();
final String indexPrefix = "test_" + randomInt() + "_";
int nbIndices = randomIntBetween(1, 5);
int[] docsPerIndex = new int[nbIndices];
for (int i = 0; i < nbIndices; i++) {
docsPerIndex[i] = randomIntBetween(1, 20);
for (int j = 0; j < docsPerIndex[i]; j++) {
client().prepareIndex("test-" + i, "test").setSource("num", i).get();
client().prepareIndex(indexPrefix + i, "test").setSource("num", i).get();
}
}
String clusterName = client().admin().cluster().prepareHealth().get().getClusterName();
String clusterUUID = client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID();
client().admin().indices().prepareRefresh().get();
for (int i = 0; i < nbIndices; i++) {
assertHitCount(client().prepareCount("test-" + i).get(), docsPerIndex[i]);
assertHitCount(client().prepareCount(indexPrefix + i).get(), docsPerIndex[i]);
}
waitForRelocation();
Collection<MarvelDoc> results = newIndexStatsCollector().doCollect();
assertThat(results, hasSize(nbIndices));
for (int i = 0; i < nbIndices; i++) {
String indexName = indexPrefix + i;
boolean found = false;
Iterator<MarvelDoc> it = results.iterator();
@ -100,46 +105,35 @@ public class IndexStatsCollectorTests extends ESSingleNodeTestCase {
assertThat(marvelDoc, instanceOf(IndexStatsMarvelDoc.class));
IndexStatsMarvelDoc indexStatsMarvelDoc = (IndexStatsMarvelDoc) marvelDoc;
IndexStats indexStats = indexStatsMarvelDoc.getIndexStats();
assertNotNull(indexStats);
IndexStatsMarvelDoc.Payload payload = indexStatsMarvelDoc.payload();
assertNotNull(payload);
assertNotNull(payload.getIndexStats());
if (payload.getIndexStats().getIndex().equals("test-" + i)) {
assertThat(indexStatsMarvelDoc.clusterName(), equalTo(clusterName));
if (indexStats.getIndex().equals(indexPrefix + i)) {
assertThat(indexStatsMarvelDoc.clusterUUID(), equalTo(clusterUUID));
assertThat(indexStatsMarvelDoc.timestamp(), greaterThan(0L));
assertThat(indexStatsMarvelDoc.type(), equalTo(IndexStatsCollector.TYPE));
assertNotNull(payload.getIndexStats().getTotal().getDocs());
assertThat(payload.getIndexStats().getTotal().getDocs().getCount(), equalTo((long) docsPerIndex[i]));
assertNotNull(payload.getIndexStats().getTotal().getStore());
assertThat(payload.getIndexStats().getTotal().getStore().getSizeInBytes(), greaterThan(0L));
assertThat(payload.getIndexStats().getTotal().getStore().getThrottleTime().millis(), equalTo(0L));
assertNotNull(payload.getIndexStats().getTotal().getIndexing());
assertThat(payload.getIndexStats().getTotal().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0L));
assertThat(indexStats.getIndex(), equalTo(indexName));
assertNotNull(indexStats.getTotal().getDocs());
assertThat(indexStats.getPrimaries().getDocs().getCount(), equalTo((long) docsPerIndex[i]));
assertNotNull(indexStats.getTotal().getStore());
assertThat(indexStats.getTotal().getStore().getSizeInBytes(), greaterThan(0L));
assertThat(indexStats.getTotal().getStore().getThrottleTime().millis(), equalTo(0L));
assertNotNull(indexStats.getTotal().getIndexing());
assertThat(indexStats.getTotal().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0L));
found = true;
}
}
assertThat("could not find collected stats for index [test-" + i + "]", found, is(true));
assertThat("could not find collected stats for index [" + indexPrefix + i + "]", found, is(true));
}
}
private IndexStatsCollector newIndexStatsCollector() {
return new IndexStatsCollector(getInstanceFromNode(Settings.class),
getInstanceFromNode(ClusterService.class),
getInstanceFromNode(ClusterName.class),
getInstanceFromNode(MarvelSettingsService.class),
client());
}
public void waitForNoBlocksOnNode() throws InterruptedException {
final long start = System.currentTimeMillis();
final TimeValue timeout = TimeValue.timeValueSeconds(30);
ImmutableSet<ClusterBlock> blocks;
do {
blocks = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState().blocks().global();
}
while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis());
assertTrue(blocks.isEmpty());
String nodeId = randomFrom(internalCluster().getNodeNames());
return new IndexStatsCollector(internalCluster().getInstance(Settings.class, nodeId),
internalCluster().getInstance(ClusterService.class, nodeId),
internalCluster().getInstance(MarvelSettings.class, nodeId),
internalCluster().getInstance(LicenseService.class, nodeId),
client(nodeId));
}
}

View File

@ -5,24 +5,24 @@
*/
package org.elasticsearch.marvel.agent.collector.node;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.marvel.agent.collector.AbstractCollectorTestCase;
import org.elasticsearch.marvel.agent.exporter.MarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettingsService;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.marvel.license.LicenseService;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.Test;
import java.util.Collection;
import static org.hamcrest.Matchers.*;
public class NodeStatsCollectorTests extends ESIntegTestCase {
public class NodeStatsCollectorTests extends AbstractCollectorTestCase {
@Test
public void testNodeStatsCollector() throws Exception {
@ -37,27 +37,51 @@ public class NodeStatsCollectorTests extends ESIntegTestCase {
assertThat(marvelDoc, instanceOf(NodeStatsMarvelDoc.class));
NodeStatsMarvelDoc nodeStatsMarvelDoc = (NodeStatsMarvelDoc) marvelDoc;
assertThat(nodeStatsMarvelDoc.clusterName(), equalTo(client().admin().cluster().prepareHealth().get().getClusterName()));
assertThat(nodeStatsMarvelDoc.clusterUUID(), equalTo(client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID()));
assertThat(nodeStatsMarvelDoc.timestamp(), greaterThan(0L));
assertThat(nodeStatsMarvelDoc.type(), equalTo(NodeStatsCollector.TYPE));
NodeStatsMarvelDoc.Payload payload = nodeStatsMarvelDoc.payload();
assertNotNull(payload);
assertThat(payload.getNodeId(), equalTo(internalCluster().getInstance(DiscoveryService.class, node).localNode().id()));
assertThat(payload.isNodeMaster(), equalTo(node.equals(internalCluster().getMasterName())));
assertThat(payload.isMlockall(), equalTo(Bootstrap.isMemoryLocked()));
assertNotNull(payload.isDiskThresholdDeciderEnabled());
assertNotNull(payload.getDiskThresholdWaterMarkHigh());
assertThat(nodeStatsMarvelDoc.getNodeId(), equalTo(internalCluster().getInstance(DiscoveryService.class, node).localNode().id()));
assertThat(nodeStatsMarvelDoc.isNodeMaster(), equalTo(node.equals(internalCluster().getMasterName())));
assertThat(nodeStatsMarvelDoc.isMlockall(), equalTo(BootstrapInfo.isMemoryLocked()));
assertNotNull(nodeStatsMarvelDoc.isDiskThresholdDeciderEnabled());
assertNotNull(nodeStatsMarvelDoc.getDiskThresholdWaterMarkHigh());
assertNotNull(payload.getNodeStats());
assertNotNull(nodeStatsMarvelDoc.getNodeStats());
}
}
@Test
public void testNodeStatsCollectorWithLicensing() {
String[] nodes = internalCluster().getNodeNames();
for (String node : nodes) {
logger.debug("--> creating a new instance of the collector");
NodeStatsCollector collector = newNodeStatsCollector(node);
assertNotNull(collector);
logger.debug("--> enabling license and checks that the collector can collect data");
enableLicense();
assertCanCollect(collector);
logger.debug("--> starting graceful period and checks that the collector can still collect data");
beginGracefulPeriod();
assertCanCollect(collector);
logger.debug("--> ending graceful period and checks that the collector cannot collect data");
endGracefulPeriod();
assertCannotCollect(collector);
logger.debug("--> disabling license and checks that the collector cannot collect data");
disableLicense();
assertCannotCollect(collector);
}
}
private NodeStatsCollector newNodeStatsCollector(final String nodeId) {
return new NodeStatsCollector(internalCluster().getInstance(Settings.class, nodeId),
internalCluster().getInstance(ClusterService.class, nodeId),
internalCluster().getInstance(ClusterName.class, nodeId),
internalCluster().getInstance(MarvelSettingsService.class, nodeId),
internalCluster().getInstance(MarvelSettings.class, nodeId),
internalCluster().getInstance(LicenseService.class, nodeId),
internalCluster().getInstance(NodeService.class, nodeId),
internalCluster().getInstance(DiscoveryService.class, nodeId),
new Provider<DiskThresholdDecider>() {

View File

@ -16,6 +16,7 @@ import org.elasticsearch.license.plugin.LicensePlugin;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.AgentService;
import org.elasticsearch.marvel.agent.collector.indices.IndexStatsMarvelDoc;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@ -32,6 +33,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
// Transport Client instantiation also calls the marvel plugin, which then fails to find modules
@ClusterScope(transportClientRatio = 0.0, scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
@ESIntegTestCase.SuppressLocalMode
public class HttpESExporterTests extends ESIntegTestCase {
final static AtomicLong timeStampGenerator = new AtomicLong();
@ -47,7 +49,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
@Test
public void testHttpServerOff() {
Settings.Builder builder = Settings.builder()
.put(AgentService.SETTINGS_INTERVAL, "200m")
.put(MarvelSettings.STARTUP_DELAY, "200m")
.put(Node.HTTP_ENABLED, false);
internalCluster().startNode(builder);
HttpESExporter httpEsExporter = getEsExporter();
@ -85,7 +87,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
@Test
public void testTemplateAdditionDespiteOfLateClusterForming() {
Settings.Builder builder = Settings.builder()
.put(AgentService.SETTINGS_INTERVAL, "200m")
.put(MarvelSettings.STARTUP_DELAY, "200m")
.put(Node.HTTP_ENABLED, true)
.put("discovery.type", "zen")
.put("discovery.zen.ping_timeout", "1s")
@ -113,7 +115,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
public void testDynamicHostChange() {
// disable exporting to be able to use non valid hosts
Settings.Builder builder = Settings.builder()
.put(AgentService.SETTINGS_INTERVAL, "-1");
.put(MarvelSettings.INTERVAL, "-1");
internalCluster().startNode(builder);
HttpESExporter httpEsExporter = getEsExporter();
@ -133,7 +135,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
@Test
public void testHostChangeReChecksTemplate() {
Settings.Builder builder = Settings.builder()
.put(AgentService.SETTINGS_INTERVAL, "200m")
.put(MarvelSettings.STARTUP_DELAY, "200m")
.put(Node.HTTP_ENABLED, true);
internalCluster().startNode(builder);
@ -159,7 +161,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
@Test
public void testHostFailureChecksTemplate() throws InterruptedException, IOException {
Settings.Builder builder = Settings.builder()
.put(AgentService.SETTINGS_INTERVAL, "200m")
.put(MarvelSettings.STARTUP_DELAY, "200m")
.put(Node.HTTP_ENABLED, true);
final String node0 = internalCluster().startNode(builder);
String node1 = internalCluster().startNode(builder);
@ -219,7 +221,7 @@ public class HttpESExporterTests extends ESIntegTestCase {
}
private MarvelDoc newRandomMarvelDoc() {
return IndexStatsMarvelDoc.createMarvelDoc(internalCluster().getClusterName(), "test_marvelDoc", timeStampGenerator.incrementAndGet(),
return new IndexStatsMarvelDoc(internalCluster().getClusterName(), "test_marvelDoc", timeStampGenerator.incrementAndGet(),
new IndexStats("test_index", null));
}

View File

@ -69,8 +69,6 @@ public class RendererTestUtils {
}
break;
case VALUE_NUMBER:
assertThat("numeric type for property '" + resultParser.currentName() + "' must be identical",
resultParser.numberType(), Matchers.equalTo(expectedParser.numberType()));
if (verifyValues) {
assertThat("numeric value for property '" + resultParser.currentName() + "' must be identical",
resultParser.numberValue(), Matchers.equalTo(expectedParser.numberValue()));

View File

@ -36,7 +36,7 @@ public class ClusterStateRendererTests extends ESSingleNodeTestCase {
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().get();
logger.debug("--> creating the cluster state marvel document");
ClusterStateMarvelDoc marvelDoc = ClusterStateMarvelDoc.createMarvelDoc("test", "marvel_cluster_state", 1437580442979L,
ClusterStateMarvelDoc marvelDoc = new ClusterStateMarvelDoc("test", "marvel_cluster_state", 1437580442979L,
clusterState, clusterHealth.getStatus());
logger.debug("--> rendering the document");

View File

@ -25,7 +25,7 @@ public class ClusterStatsRendererTests extends ESSingleNodeTestCase {
ClusterStatsResponse clusterStats = client().admin().cluster().prepareClusterStats().get();
logger.debug("--> creating the cluster stats marvel document");
ClusterStatsMarvelDoc marvelDoc = ClusterStatsMarvelDoc.createMarvelDoc("test", "marvel_cluster_stats", 1437580442979L, clusterStats);
ClusterStatsMarvelDoc marvelDoc = new ClusterStatsMarvelDoc("test", "marvel_cluster_stats", 1437580442979L, clusterStats);
logger.debug("--> rendering the document");
Renderer renderer = new ClusterStatsRenderer();

View File

@ -53,7 +53,7 @@ public class IndexRecoveryRendererTests extends ESTestCase {
RecoveryResponse recoveryResponse = new RecoveryResponse(2, 2, 2, false, shardResponses, null);
IndexRecoveryMarvelDoc marvelDoc = IndexRecoveryMarvelDoc.createMarvelDoc("test", "marvel_index_recovery", 1437580442979L, recoveryResponse);
IndexRecoveryMarvelDoc marvelDoc = new IndexRecoveryMarvelDoc("test", "marvel_index_recovery", 1437580442979L, recoveryResponse);
logger.debug("--> rendering the document");
Renderer renderer = new IndexRecoveryRenderer();

View File

@ -0,0 +1,116 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.renderer.indices;
import org.elasticsearch.action.count.CountResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.license.plugin.LicensePlugin;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.collector.indices.IndexStatsCollector;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.junit.Test;
import java.util.Map;
import static org.hamcrest.Matchers.*;
@ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1)
public class IndexStatsRendererIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", MarvelPlugin.class.getName() + "," + LicensePlugin.class.getName())
.put(Node.HTTP_ENABLED, true)
.put(MarvelSettings.STARTUP_DELAY, "1s")
.put(MarvelSettings.INTERVAL, "30s")
.put(MarvelSettings.COLLECTORS, IndexStatsCollector.NAME)
.build();
}
@Test
public void testIndexStats() throws Exception {
final int nbIndices = randomIntBetween(1, 5);
for (int i = 0; i < nbIndices; i++) {
createIndex("test" + i);
}
final long[] nbDocsPerIndex = new long[nbIndices];
for (int i = 0; i < nbIndices; i++) {
nbDocsPerIndex[i] = randomIntBetween(1, 50);
for (int j = 0; j < nbDocsPerIndex[i]; j++) {
client().prepareIndex("test" + i, "type1").setSource("num", i).get();
}
}
refresh();
logger.debug("--> wait for index stats collector to collect stat for each index");
assertBusy(new Runnable() {
@Override
public void run() {
for (int i = 0; i < nbIndices; i++) {
CountResponse count = client().prepareCount()
.setTypes(IndexStatsCollector.TYPE)
.setQuery(QueryBuilders.termQuery("index_stats.index", "test" + i))
.get();
assertThat(count.getCount(), greaterThan(0L));
}
}
});
logger.debug("--> get the list of filters used by the renderer");
String[] filters = IndexStatsRenderer.FILTERS;
logger.debug("--> checking that every document contains the expected fields");
SearchResponse response = client().prepareSearch().setTypes(IndexStatsCollector.TYPE).get();
for (SearchHit searchHit : response.getHits().getHits()) {
Map<String, Object> fields = searchHit.sourceAsMap();
for (String filter : filters) {
assertContains(filter, fields);
}
}
}
/**
* Checks if a field exist in a map of values. If the field contains a dot like 'foo.bar'
* it checks that 'foo' exists in the map of values and that it points to a sub-map. Then
* it recurses to check if 'bar' exists in the sub-map.
*/
private void assertContains(String field, Map<String, Object> values) {
assertNotNull(field);
assertNotNull(values);
int point = field.indexOf('.');
if (point > -1) {
assertThat(point, allOf(greaterThan(0), lessThan(field.length())));
String segment = field.substring(0, point);
assertTrue(Strings.hasText(segment));
Object value = values.get(segment);
assertNotNull(value);
String next = field.substring(point + 1);
if (next.length() > 0) {
assertTrue(value instanceof Map);
assertContains(next, (Map<String, Object>) value);
} else {
assertFalse(value instanceof Map);
}
} else {
assertNotNull(values.get(field));
}
}
}

View File

@ -8,7 +8,11 @@ package org.elasticsearch.marvel.agent.renderer.indices;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.marvel.agent.collector.indices.IndexStatsMarvelDoc;
@ -25,14 +29,20 @@ public class IndexStatsRendererTests extends ESTestCase {
@Test
public void testIndexStatsRenderer() throws Exception {
logger.debug("--> creating the index stats marvel document");
IndexStatsMarvelDoc marvelDoc = IndexStatsMarvelDoc.createMarvelDoc("test", "marvel_index_stats", 1437580442979L,
IndexStatsMarvelDoc marvelDoc = new IndexStatsMarvelDoc("test", "marvel_index_stats", 1437580442979L,
new IndexStats("index-0", new ShardStats[0]) {
@Override
public CommonStats getTotal() {
CommonStats stats = new CommonStats();
stats.docs = new DocsStats(345678L, 123L);
stats.store = new StoreStats(5761573L, 0L);
stats.indexing = new IndexingStats(new IndexingStats.Stats(0L, 0L, 0L, 0L, 0L, 0L, 0L, true, 302L), null);
stats.indexing = new IndexingStats(new IndexingStats.Stats(3L, 71L, 0L, 0L, 0L, 0L, 0L, true, 302L), null);
stats.search = new SearchStats(new SearchStats.Stats(1L, 7L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), 0L, null);
stats.merge = new MergeStats();
stats.merge.add(0L, 0L, 0L, 42L, 0L, 0L, 0L, 0L, 0L, 0L);
stats.refresh = new RefreshStats(0L, 978L);
stats.segments = new SegmentsStats();
stats.segments.add(0, 87965412L);
return stats;
}
@ -40,7 +50,7 @@ public class IndexStatsRendererTests extends ESTestCase {
public CommonStats getPrimaries() {
// Primaries will be filtered out by the renderer
CommonStats stats = new CommonStats();
stats.docs = new DocsStats(randomLong(), randomLong());
stats.docs = new DocsStats(345678L, randomLong());
stats.store = new StoreStats(randomLong(), randomLong());
stats.indexing = new IndexingStats(new IndexingStats.Stats(0L, 0L, 0L, 0L, 0L, 0L, 0L, true, randomLong()), null);
return stats;

View File

@ -0,0 +1,121 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.renderer.licenses;
import org.elasticsearch.Version;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.license.core.License;
import org.elasticsearch.license.plugin.LicensePlugin;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.agent.collector.licenses.LicensesCollector;
import org.elasticsearch.marvel.agent.settings.MarvelSettings;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.*;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, transportClientRatio = 0.0)
public class LicensesRendererIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", MarvelPlugin.class.getName() + "," + LicensePlugin.class.getName())
.put(Node.HTTP_ENABLED, true)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(MarvelSettings.STARTUP_DELAY, "1s")
.put(MarvelSettings.COLLECTORS, LicensesCollector.NAME)
.build();
}
@Test
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/13017")
public void testLicenses() throws Exception {
final String clusterUUID = client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID();
assertTrue(Strings.hasText(clusterUUID));
logger.debug("--> waiting for licenses collector to collect data (ie, the trial marvel license)");
GetResponse response = assertBusy(new Callable<GetResponse>() {
@Override
public GetResponse call() throws Exception {
// Checks if the marvel data index exists (it should have been created by the LicenseCollector)
assertTrue(MarvelSettings.MARVEL_DATA_INDEX_NAME + " index does not exist", client().admin().indices().prepareExists(MarvelSettings.MARVEL_DATA_INDEX_NAME).get().isExists());
ensureYellow(MarvelSettings.MARVEL_DATA_INDEX_NAME);
GetResponse response = client().prepareGet(MarvelSettings.MARVEL_DATA_INDEX_NAME, LicensesCollector.TYPE, clusterUUID).get();
assertTrue(MarvelSettings.MARVEL_DATA_INDEX_NAME + " document does not exist", response.isExists());
return response;
}
});
logger.debug("--> checking that the document contains license information");
assertThat(response.getIndex(), equalTo(MarvelSettings.MARVEL_DATA_INDEX_NAME));
assertThat(response.getType(), equalTo(LicensesCollector.TYPE));
assertThat(response.getId(), equalTo(clusterUUID));
Map<String, Object> source = response.getSource();
assertThat((String) source.get(LicensesRenderer.Fields.CLUSTER_NAME.underscore().toString()), equalTo(cluster().getClusterName()));
assertThat((String) source.get(LicensesRenderer.Fields.VERSION.underscore().toString()), equalTo(Version.CURRENT.toString()));
Object licensesList = source.get(LicensesRenderer.Fields.LICENSES.underscore().toString());
assertThat(licensesList, instanceOf(List.class));
List licenses = (List) licensesList;
assertThat(licenses.size(), equalTo(1));
Map license = (Map) licenses.iterator().next();
assertThat(license, instanceOf(Map.class));
String uid = (String) ((Map) license).get(LicensesRenderer.Fields.UID.underscore().toString());
assertThat(uid, not(isEmptyOrNullString()));
String type = (String) ((Map) license).get(LicensesRenderer.Fields.TYPE.underscore().toString());
assertThat(type, not(isEmptyOrNullString()));
String status = (String) ((Map) license).get(LicensesRenderer.Fields.STATUS.underscore().toString());
assertThat(status, not(isEmptyOrNullString()));
Long expiryDate = (Long) ((Map) license).get(LicensesRenderer.Fields.EXPIRY_DATE_IN_MILLIS.underscore().toString());
assertThat(expiryDate, greaterThan(0L));
// We basically recompute the hash here
String hkey = (String) ((Map) license).get(LicensesRenderer.Fields.HKEY.underscore().toString());
String recalculated = LicensesRenderer.hash(status, uid, type, String.valueOf(expiryDate), clusterUUID);
assertThat(hkey, equalTo(recalculated));
assertThat((String) ((Map) license).get(LicensesRenderer.Fields.FEATURE.underscore().toString()), not(isEmptyOrNullString()));
assertThat((String) ((Map) license).get(LicensesRenderer.Fields.ISSUER.underscore().toString()), not(isEmptyOrNullString()));
assertThat((String) ((Map) license).get(LicensesRenderer.Fields.ISSUED_TO.underscore().toString()), not(isEmptyOrNullString()));
assertThat((Long) ((Map) license).get(LicensesRenderer.Fields.ISSUE_DATE_IN_MILLIS.underscore().toString()), greaterThan(0L));
assertThat((Integer) ((Map) license).get(LicensesRenderer.Fields.MAX_NODES.underscore().toString()), greaterThan(0));
logger.debug("--> check that the cluster_licenses is not indexed");
refresh();
assertHitCount(client().prepareCount()
.setIndices(MarvelSettings.MARVEL_DATA_INDEX_NAME)
.setTypes(LicensesCollector.TYPE)
.setQuery(QueryBuilders.boolQuery()
.should(QueryBuilders.matchQuery(LicensesRenderer.Fields.STATUS.underscore().toString(), License.Status.ACTIVE.label()))
.should(QueryBuilders.matchQuery(LicensesRenderer.Fields.STATUS.underscore().toString(), License.Status.INVALID.label()))
.should(QueryBuilders.matchQuery(LicensesRenderer.Fields.STATUS.underscore().toString(), License.Status.EXPIRED.label()))
.minimumNumberShouldMatch(1)
).get(), 0L);
}
}

View File

@ -27,7 +27,7 @@ public class NodeStatsRendererTests extends ESSingleNodeTestCase {
NodeStats nodeStats = getInstanceFromNode(NodeService.class).stats();
logger.debug("--> creating the node stats marvel document");
NodeStatsMarvelDoc marvelDoc = NodeStatsMarvelDoc.createMarvelDoc("test", "marvel_node_stats", 1437580442979L,
NodeStatsMarvelDoc marvelDoc = new NodeStatsMarvelDoc("test", "marvel_node_stats", 1437580442979L,
"node-0", true, nodeStats, false, 90.0, true);
logger.debug("--> rendering the document");

View File

@ -23,16 +23,14 @@ public class MarvelSettingTests extends ESTestCase {
if (randomBoolean()) {
defaultValue = randomBoolean();
}
boolean dynamic = randomBoolean();
MarvelSetting.BooleanSetting setting = MarvelSetting.booleanSetting(name, defaultValue, description);
MarvelSetting.BooleanSetting setting = MarvelSetting.booleanSetting(name, defaultValue, description, dynamic);
assertThat(setting.getName(), equalTo(name));
assertThat(setting.getDescription(), equalTo(description));
assertThat(setting.getDefaultValue(), equalTo(defaultValue));
setting.onInit(settingsBuilder().build());
assertThat(setting.getValue(), equalTo(defaultValue));
setting.onInit(settingsBuilder().put(name, Boolean.FALSE).build());
setting.onRefresh(settingsBuilder().put(name, Boolean.FALSE).build());
assertFalse(setting.getValue());
setting.onRefresh(settingsBuilder().put(name, Boolean.TRUE).build());
@ -47,28 +45,22 @@ public class MarvelSettingTests extends ESTestCase {
if (randomBoolean()) {
defaultValue = randomTimeValue();
}
boolean dynamic = randomBoolean();
MarvelSetting.TimeValueSetting setting = MarvelSetting.timeSetting(name, defaultValue, description);
MarvelSetting.TimeValueSetting setting = MarvelSetting.timeSetting(name, defaultValue, description, dynamic);
assertThat(setting.getName(), equalTo(name));
assertThat(setting.getDescription(), equalTo(description));
if (defaultValue == null) {
assertNull(setting.getDefaultValue());
} else {
assertThat(setting.getDefaultValue().millis(), equalTo(defaultValue.millis()));
}
setting.onInit(settingsBuilder().build());
if (defaultValue == null) {
assertNull(setting.getValue());
} else {
assertThat(setting.getValue().millis(), equalTo(defaultValue.millis()));
}
setting.onInit(settingsBuilder().put(name, 15000L).build());
setting.onRefresh(settingsBuilder().put(name, 15000L).build());
assertThat(setting.getValue().millis(), equalTo(15000L));
TimeValue updated = randomTimeValue();
setting.onInit(settingsBuilder().put(name, updated.toString()).build());
setting.onRefresh(settingsBuilder().put(name, updated.toString()).build());
assertThat(setting.getValue().millis(), equalTo(updated.millis()));
updated = randomTimeValue();
@ -84,17 +76,22 @@ public class MarvelSettingTests extends ESTestCase {
if (randomBoolean()) {
defaultValue = randomAsciiOfLength(15);
}
boolean dynamic = randomBoolean();
MarvelSetting.StringSetting setting = MarvelSetting.stringSetting(name, defaultValue, description);
MarvelSetting.StringSetting setting = MarvelSetting.stringSetting(name, defaultValue, description, dynamic);
assertThat(setting.getName(), equalTo(name));
assertThat(setting.getDescription(), equalTo(description));
assertThat(setting.getDefaultValue(), equalTo(defaultValue));
if (defaultValue == null) {
assertNull(setting.getValue());
} else {
assertThat(setting.getValue(), equalTo(defaultValue));
}
setting.onInit(settingsBuilder().build());
setting.onRefresh(settingsBuilder().build());
assertThat(setting.getValue(), equalTo(defaultValue));
String updated = randomAsciiOfLength(15);
setting.onInit(settingsBuilder().put(name, updated).build());
setting.onRefresh(settingsBuilder().put(name, updated).build());
assertThat(setting.getValue(), equalTo(updated));
updated = randomAsciiOfLength(15);
@ -110,25 +107,22 @@ public class MarvelSettingTests extends ESTestCase {
if (randomBoolean()) {
defaultValue = randomStringArray();
}
boolean dynamic = randomBoolean();
MarvelSetting.StringArraySetting setting = MarvelSetting.arraySetting(name, defaultValue, description);
MarvelSetting.StringArraySetting setting = MarvelSetting.arraySetting(name, defaultValue, description, dynamic);
assertThat(setting.getName(), equalTo(name));
assertThat(setting.getDescription(), equalTo(description));
if (defaultValue == null) {
assertNull(setting.getDefaultValue());
} else {
assertArrayEquals(setting.getDefaultValue(), defaultValue);
}
setting.onInit(settingsBuilder().build());
if (defaultValue == null) {
assertArrayEquals(setting.getValue(), Strings.EMPTY_ARRAY);
assertNull(setting.getValue());
} else {
assertArrayEquals(setting.getValue(), defaultValue);
}
setting.onRefresh(settingsBuilder().build());
assertArrayEquals(setting.getValue(), defaultValue);
String[] updated = randomStringArray();
setting.onInit(settingsBuilder().put(name, Strings.arrayToCommaDelimitedString(updated)).build());
setting.onRefresh(settingsBuilder().put(name, Strings.arrayToCommaDelimitedString(updated)).build());
assertArrayEquals(setting.getValue(), updated);
updated = randomStringArray();

View File

@ -1,42 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.settings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.equalTo;
public class MarvelSettingsServiceTests extends ESTestCase {
@Test
public void testMarvelSettingService() {
MarvelSettingsService service = new MarvelSettingsService(Settings.EMPTY);
TimeValue indexStatsTimeout = service.indexStatsTimeout();
assertNotNull(indexStatsTimeout);
String[] indices = service.indices();
assertNotNull(indices);
TimeValue updatedIndexStatsTimeout = TimeValue.timeValueSeconds(60L);
String[] updatedIndices = new String[]{"index-0", "index-1"};
Settings settings = settingsBuilder()
.put(service.indexStatsTimeout.getName(), updatedIndexStatsTimeout)
.put(service.indices.getName(), Strings.arrayToCommaDelimitedString(updatedIndices))
.build();
service.onRefreshSettings(settings);
assertThat(service.indexStatsTimeout(), equalTo(updatedIndexStatsTimeout));
assertArrayEquals(service.indices(), updatedIndices);
}
}

View File

@ -0,0 +1,158 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.agent.settings;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.license.plugin.LicensePlugin;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.Test;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.*;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1)
public class MarvelSettingsTests extends ESIntegTestCase {
private final TimeValue startUp = randomTimeValue();
private final TimeValue interval = randomTimeValue();
private final TimeValue indexStatsTimeout = randomTimeValue();
private final String[] indices = randomStringArray();
private final TimeValue clusterStateTimeout = randomTimeValue();
private final TimeValue clusterStatsTimeout = randomTimeValue();
private final TimeValue recoveryTimeout = randomTimeValue();
private final Boolean recoveryActiveOnly = randomBoolean();
private final String[] collectors = randomStringArray();
private final TimeValue licenseGracePeriod = randomExpirationDelay();
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", MarvelPlugin.class.getName() + "," + LicensePlugin.class.getName())
.put(Node.HTTP_ENABLED, true)
.put(marvelSettings())
.build();
}
private Settings marvelSettings() {
return Settings.builder()
.put(MarvelSettings.STARTUP_DELAY, startUp)
.put(MarvelSettings.INTERVAL, interval)
.put(MarvelSettings.INDEX_STATS_TIMEOUT, indexStatsTimeout)
.putArray(MarvelSettings.INDICES, indices)
.put(MarvelSettings.CLUSTER_STATE_TIMEOUT, clusterStateTimeout)
.put(MarvelSettings.CLUSTER_STATS_TIMEOUT, clusterStatsTimeout)
.put(MarvelSettings.INDEX_RECOVERY_TIMEOUT, recoveryTimeout)
.put(MarvelSettings.INDEX_RECOVERY_ACTIVE_ONLY, recoveryActiveOnly)
.putArray(MarvelSettings.COLLECTORS, collectors)
.put(MarvelSettings.LICENSE_GRACE_PERIOD, licenseGracePeriod)
.build();
}
@Test
public void testMarvelSettingService() throws Exception {
logger.info("--> testing marvel settings service initialization");
for (final MarvelSettings marvelSettings : internalCluster().getInstances(MarvelSettings.class)) {
assertThat(marvelSettings.startUpDelay().millis(), equalTo(startUp.millis()));
assertThat(marvelSettings.interval().millis(), equalTo(interval.millis()));
assertThat(marvelSettings.indexStatsTimeout().millis(), equalTo(indexStatsTimeout.millis()));
assertArrayEquals(marvelSettings.indices(), indices);
assertThat(marvelSettings.clusterStateTimeout().millis(), equalTo(clusterStateTimeout.millis()));
assertThat(marvelSettings.clusterStatsTimeout().millis(), equalTo(clusterStatsTimeout.millis()));
assertThat(marvelSettings.recoveryTimeout().millis(), equalTo(recoveryTimeout.millis()));
assertThat(marvelSettings.recoveryActiveOnly(), equalTo(recoveryActiveOnly));
assertArrayEquals(marvelSettings.collectors(), collectors);
assertThat(marvelSettings.licenseExpirationGracePeriod().millis(), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(MarvelSettings.MAX_LICENSE_GRACE_PERIOD.millis())));
for (final MarvelSetting setting : MarvelSettings.dynamicSettings()) {
assertThat(marvelSettings.getSettingValue(setting.getName()), equalTo(setting.getValue()));
}
}
logger.info("--> testing marvel dynamic settings update");
for (final MarvelSetting setting : MarvelSettings.dynamicSettings()) {
Object updated = null;
Settings.Builder transientSettings = Settings.builder();
if (setting instanceof MarvelSetting.TimeValueSetting) {
updated = randomTimeValue();
transientSettings.put(setting.getName(), updated);
} else if (setting instanceof MarvelSetting.BooleanSetting) {
updated = randomBoolean();
transientSettings.put(setting.getName(), updated);
} else if (setting instanceof MarvelSetting.StringSetting) {
updated = randomAsciiOfLength(10);
transientSettings.put(setting.getName(), updated);
} else if (setting instanceof MarvelSetting.StringArraySetting) {
updated = randomStringArray();
transientSettings.putArray(setting.getName(), (String[]) updated);
}
logger.info("--> updating {} to value [{}]", setting, updated);
assertAcked(prepareRandomUpdateSettings(transientSettings.build()).get());
// checking that the value has been correctly updated on all marvel settings services
final Object expected = updated;
assertBusy(new Runnable() {
@Override
public void run() {
for (final MarvelSettings marvelSettings : internalCluster().getInstances(MarvelSettings.class)) {
MarvelSetting current = marvelSettings.getSetting(setting.getName());
Object value = current.getValue();
logger.info("--> {} in {}", current, marvelSettings);
if (setting instanceof MarvelSetting.TimeValueSetting) {
assertThat(((TimeValue) value).millis(), equalTo(((TimeValue) expected).millis()));
} else if (setting instanceof MarvelSetting.BooleanSetting) {
assertThat((Boolean) value, equalTo((Boolean) expected));
} else if (setting instanceof MarvelSetting.StringSetting) {
assertThat((String) value, equalTo((String) expected));
} else if (setting instanceof MarvelSetting.StringArraySetting) {
assertArrayEquals((String[]) value, (String[]) expected);
}
}
}
});
}
}
private ClusterUpdateSettingsRequestBuilder prepareRandomUpdateSettings(Settings updateSettings) {
ClusterUpdateSettingsRequestBuilder requestBuilder = client().admin().cluster().prepareUpdateSettings();
if (randomBoolean()) {
requestBuilder.setTransientSettings(updateSettings);
} else {
requestBuilder.setPersistentSettings(updateSettings);
}
return requestBuilder;
}
private TimeValue randomTimeValue() {
return TimeValue.parseTimeValue(randomFrom("1s", "10s", "30s", "1m", "30m", "1h"), null, getClass().getSimpleName());
}
private String[] randomStringArray() {
final int size = scaledRandomIntBetween(1, 10);
String[] items = new String[size];
for (int i = 0; i < size; i++) {
items[i] = randomAsciiOfLength(5);
}
return items;
}
private TimeValue randomExpirationDelay() {
return randomBoolean() ? randomTimeValue() : TimeValue.timeValueHours(randomIntBetween(-10, 10) * 24);
}
}

View File

@ -18,17 +18,20 @@ import org.elasticsearch.license.plugin.core.LicensesClientService;
import org.elasticsearch.license.plugin.core.LicensesService;
import org.elasticsearch.marvel.MarvelPlugin;
import org.elasticsearch.marvel.mode.Mode;
import org.elasticsearch.plugins.AbstractPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
@ClusterScope(scope = SUITE, transportClientRatio = 0, numClientNodes = 0)
public class LicenseIntegrationTests extends ESIntegTestCase {
@ -43,22 +46,27 @@ public class LicenseIntegrationTests extends ESIntegTestCase {
@Test
public void testEnableDisableLicense() {
assertMarvelMode(Mode.STANDARD);
assertThat(getLicenseService().mode(), equalTo(Mode.STANDARD));
assertThat(getLicenseService().enabled(), is(true));
assertThat(getLicenseService().expiryDate(), greaterThan(0L));
disableLicensing();
assertMarvelMode(Mode.LITE);
assertThat(getLicenseService().mode(), equalTo(Mode.LITE));
assertThat(getLicenseService().enabled(), is(false));
assertThat(getLicenseService().expiryDate(), greaterThan(0L));
enableLicensing();
assertMarvelMode(Mode.STANDARD);
assertThat(getLicenseService().mode(), equalTo(Mode.STANDARD));
assertThat(getLicenseService().enabled(), is(true));
assertThat(getLicenseService().expiryDate(), greaterThan(0L));
}
private void assertMarvelMode(Mode expected) {
private LicenseService getLicenseService() {
LicenseService licenseService = internalCluster().getInstance(LicenseService.class);
assertNotNull(licenseService);
assertThat(licenseService.mode(), equalTo(expected));
return licenseService;
}
public static void disableLicensing() {
for (MockLicenseService service : internalCluster().getInstances(MockLicenseService.class)) {
service.disable();
@ -71,7 +79,7 @@ public class LicenseIntegrationTests extends ESIntegTestCase {
}
}
public static class MockLicensePlugin extends AbstractPlugin {
public static class MockLicensePlugin extends Plugin {
public static final String NAME = "internal-test-licensing";
@ -86,8 +94,8 @@ public class LicenseIntegrationTests extends ESIntegTestCase {
}
@Override
public Collection<Class<? extends Module>> modules() {
return ImmutableSet.<Class<? extends Module>>of(InternalLicenseModule.class);
public Collection<Module> nodeModules() {
return Collections.<Module>singletonList(new InternalLicenseModule());
}
}
@ -123,7 +131,7 @@ public class LicenseIntegrationTests extends ESIntegTestCase {
}
@Override
public void register(String s, LicensesService.TrialLicenseOptions trialLicenseOptions, Collection<LicensesService.ExpirationCallback> collection, Listener listener) {
public void register(String s, LicensesService.TrialLicenseOptions trialLicenseOptions, Collection<LicensesService.ExpirationCallback> collection, AcknowledgementCallback acknowledgementCallback, Listener listener) {
listeners.add(listener);
enable();
}

View File

@ -7,5 +7,5 @@ log4j.additivity.org.apache.http=false
log4j.appender.out=org.apache.log4j.ConsoleAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
log4j.logger.org.elasticsearch.marvel.agent=TRACE, out
log4j.additivity.org.elasticsearch.marvel.agent=false
log4j.logger.org.elasticsearch.marvel=TRACE

View File

@ -1,5 +1,5 @@
{
"cluster_name": "test",
"cluster_uuid": "dsFPzYRyQCe6cq48a0wxkQ",
"timestamp": "2015-07-22T15:54:02.979Z",
"cluster_state": {
"status": "yellow",

View File

@ -1,5 +1,5 @@
{
"cluster_name": "test",
"cluster_uuid": "dsFPzYRyQCe6cq48a0wxkQ",
"timestamp": "2015-07-22T15:54:02.979Z",
"cluster_stats": {
"indices": {
@ -23,7 +23,7 @@
"total": 1
},
"versions": [
"2.0.0-beta1-SNAPSHOT"
"2.0.0-SNAPSHOT"
],
"jvm": {
"max_uptime_in_millis": 75191

View File

@ -1,5 +1,5 @@
{
"cluster_name": "test",
"cluster_uuid": "dsFPzYRyQCe6cq48a0wxkQ",
"timestamp": "2015-07-22T15:54:02.979Z",
"index_recovery": {
"shards": [

View File

@ -1,5 +1,5 @@
{
"cluster_name": "test",
"cluster_uuid": "dsFPzYRyQCe6cq48a0wxkQ",
"timestamp": "2015-07-22T15:54:02.979Z",
"index_stats": {
"index": "index-0",
@ -12,8 +12,28 @@
"throttle_time_in_millis": 0
},
"indexing": {
"index_total" : 3,
"index_time_in_millis" : 71,
"throttle_time_in_millis": 302
},
"search" : {
"query_total" : 1,
"query_time_in_millis" : 7
},
"merges" : {
"total_size_in_bytes" : 42
},
"refresh" : {
"total_time_in_millis" : 978
},
"segments" : {
"memory_in_bytes" : 87965412
}
},
"primaries" : {
"docs" : {
"count" : 345678
}
}
}
}
}

View File

@ -1,5 +1,5 @@
{
"cluster_name": "test",
"cluster_uuid": "dsFPzYRyQCe6cq48a0wxkQ",
"timestamp": "2015-07-22T15:54:02.979Z",
"node_stats": {
"node_id": "OB5Mr2NGQe6xYCFvMggReg",
@ -22,6 +22,9 @@
"count": 0
}
},
"os": {
"load_average" : 0.66
},
"process": {
"open_file_descriptors": 235,
"max_file_descriptors": 4096
@ -48,10 +51,10 @@
"index": {
"rejected": 0
},
"bulk": {
"search": {
"rejected": 0
},
"search": {
"bulk": {
"rejected": 0
}
},

33
pom.xml
View File

@ -6,15 +6,16 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>x-plugins</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Elasticsearch X-Plugins - Parent POM</name>
<name>X-Plugins: Parent POM</name>
<parent>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-plugin</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<artifactId>plugins</artifactId>
<version>2.1.0-SNAPSHOT</version>
</parent>
<properties>
@ -22,6 +23,7 @@
<elasticsearch.license.headerDefinition>${elasticsearch.tools.directory}/commercial-license-check/license_header_definition.xml</elasticsearch.license.headerDefinition>
<assembly.attach>true</assembly.attach>
<sources.attach>true</sources.attach>
<deploy.skip>false</deploy.skip>
</properties>
<scm>
@ -85,7 +87,7 @@
<configuration>
<!-- include x-dev-tools resources bundle -->
<resourceBundles combine.children="append">
<resourceBundle>org.elasticsearch:elasticsearch-x-dev-tools:${elasticsearch.version}</resourceBundle>
<resourceBundle>org.elasticsearch:x-dev-tools:${elasticsearch.version}</resourceBundle>
</resourceBundles>
</configuration>
</plugin>
@ -122,6 +124,13 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<configuration>
<skip>${deploy.skip}</skip>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
@ -155,6 +164,20 @@
<url>http://maven.elasticsearch.org/artifactory/public-releases</url>
</repository>
</distributionManagement>
<pluginRepositories>
<pluginRepository>
<id>oss-snapshots</id>
<name>Sonatype OSS Snapshots</name>
<url>https://oss.sonatype.org/content/repositories/snapshots/</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
<updatePolicy>always</updatePolicy>
</snapshots>
</pluginRepository>
</pluginRepositories>
<build>
<plugins>
<plugin>

View File

@ -7,7 +7,6 @@
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<packaging>pom</packaging>
<name>QA: Parent POM</name>
<inceptionYear>2015</inceptionYear>
@ -15,7 +14,7 @@
<parent>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>x-plugins</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
</parent>
<properties>
@ -174,11 +173,6 @@
<artifactId>t-digest</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
@ -321,6 +315,17 @@
<module>smoke-test-plugins</module>
<!-- Disabled until 'openssl' is available on the Windows build machines
<module>smoke-test-plugins-ssl</module>-->
<module>shield-core-rest-tests</module>
<module>smoke-test-watcher-with-shield</module>
<module>shield-example-realm</module>
</modules>
<profiles>
<profile>
<id>deploy-public</id>
<properties>
<deploy.skip>true</deploy.skip>
</properties>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,8 @@
<?xml version="1.0"?>
<project name="smoke-test-x-plugins"
xmlns:ac="antlib:net.sf.antcontrib">
<import file="${elasticsearch.integ.antfile.default}"/>
<import file="${elasticsearch.tools.directory}/ant/shield-overrides.xml"/>
</project>

View File

@ -0,0 +1,149 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>2.1.0-SNAPSHOT</version>
</parent>
<!--
This runs core REST tests with shield enabled.
-->
<artifactId>shield-core-rest-tests</artifactId>
<name>QA: Shield core REST tests</name>
<description>Run core REST tests with Shield enabled</description>
<properties>
<skip.unit.tests>true</skip.unit.tests>
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.load_packaged>true</tests.rest.load_packaged>
<tests.rest.blacklist>indices.get/10_basic/*allow_no_indices*,cat.count/10_basic/Test cat count output,cat.aliases/10_basic/Empty cluster,indices.segments/10_basic/no segments test,indices.clear_cache/10_basic/clear_cache test,indices.status/10_basic/Indices status test,cat.indices/10_basic/Test cat indices output,cat.recovery/10_basic/Test cat recovery output,cat.shards/10_basic/Test cat shards output,termvector/20_issue7121/*,index/10_with_id/Index with ID,indices.get_alias/20_emtpy/*,cat.segments/10_basic/Test cat segments output,indices.put_settings/10_basic/Test indices settings allow_no_indices,indices.put_settings/10_basic/Test indices settings ignore_unavailable,indices.refresh/10_basic/Indices refresh test no-match wildcard,indices.stats/10_index/Index - star*,indices.recovery/10_basic/Indices recovery test*,template/30_render_search_template/*,indices.shard_stores/10_basic/no indices test,cat.nodeattrs/10_basic/Test cat nodes attrs output</tests.rest.blacklist>
<xplugins.list>license,shield</xplugins.list>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>integ-setup-dependencies</id>
<phase>pre-integration-test</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<skip>${skip.integ.tests}</skip>
<useBaseVersion>true</useBaseVersion>
<outputDirectory>${integ.deps}/plugins</outputDirectory>
<artifactItems>
<!-- elasticsearch distribution -->
<artifactItem>
<groupId>org.elasticsearch.distribution.zip</groupId>
<artifactId>elasticsearch</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
<outputDirectory>${integ.deps}</outputDirectory>
</artifactItem>
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions>
</plugin>
<!-- integration tests -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<!-- start up external cluster -->
<execution>
<id>integ-setup</id>
<phase>pre-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<ant antfile="${elasticsearch.integ.antfile}" target="start-external-cluster-with-plugins">
<property name="tests.jvm.argline" value="${tests.jvm.argline}"/>
<property name="plugins.dir" value="${plugins.dir}"/>
<property name="xplugins.list" value="${xplugins.list}"/>
</ant>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
<!-- shut down external cluster -->
<execution>
<id>integ-teardown</id>
<phase>post-integration-test</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<ant antfile="${elasticsearch.integ.antfile}" target="stop-external-cluster"/>
</target>
<skip>${skip.integ.tests}</skip>
</configuration>
</execution>
</executions>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,54 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.shield;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.shield.ShieldPlugin;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.RestTestCandidate;
import org.elasticsearch.test.rest.parser.RestTestParseException;
import java.io.IOException;
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
public class RestIT extends ESRestTestCase {
private static final String USER = "test_user";
private static final String PASS = "changeme";
public RestIT(@Name("yaml") RestTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
return ESRestTestCase.createParameters(0, 1);
}
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue(USER, new SecuredString(PASS.toCharArray()));
return Settings.builder()
.put(Headers.PREFIX + ".Authorization", token)
.build();
}
@Override
protected Settings externalClusterClientSettings() {
return Settings.builder()
.put("shield.user", USER + ":" + PASS)
.put("plugin.types", ShieldPlugin.class.getName())
.build();
}
}

View File

@ -0,0 +1,92 @@
<?xml version="1.0"?>
<!--
~ ELASTICSEARCH CONFIDENTIAL
~ __________________
~
~ [2014] Elasticsearch Incorporated. All Rights Reserved.
~
~ NOTICE: All information contained herein is, and remains
~ the property of Elasticsearch Incorporated and its suppliers,
~ if any. The intellectual and technical concepts contained
~ herein are proprietary to Elasticsearch Incorporated
~ and its suppliers and may be covered by U.S. and Foreign Patents,
~ patents in process, and are protected by trade secret or copyright law.
~ Dissemination of this information or reproduction of this material
~ is strictly forbidden unless prior written permission is obtained
~ from Elasticsearch Incorporated.
-->
<project name="shield-example-realm"
xmlns:ac="antlib:net.sf.antcontrib">
<import file="${elasticsearch.integ.antfile.default}"/>
<!-- redefined to work with auth -->
<macrodef name="waitfor-elasticsearch">
<attribute name="port"/>
<attribute name="timeoutproperty"/>
<sequential>
<echo>Waiting for elasticsearch to become available on port @{port}...</echo>
<waitfor maxwait="30" maxwaitunit="second"
checkevery="500" checkeveryunit="millisecond"
timeoutproperty="@{timeoutproperty}">
<socket server="127.0.0.1" port="@{port}"/>
</waitfor>
</sequential>
</macrodef>
<target name="start-external-cluster-with-plugin" depends="setup-workspace">
<ac:for list="${xplugins.list}" param="xplugin.name">
<sequential>
<fail message="Expected @{xplugin.name}-${version}.zip as a dependency, but could not be found in ${integ.deps}/plugins}">
<condition>
<not>
<available file="${integ.deps}/plugins/@{xplugin.name}-${elasticsearch.version}.zip" />
</not>
</condition>
</fail>
</sequential>
</ac:for>
<ac:for param="file">
<path>
<fileset dir="${integ.deps}/plugins"/>
</path>
<sequential>
<local name="plugin.name"/>
<convert-plugin-name file="@{file}" outputproperty="plugin.name"/>
<install-plugin name="${plugin.name}" file="@{file}"/>
</sequential>
</ac:for>
<local name="home"/>
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
<echo>Adding shield users...</echo>
<run-script script="${home}/bin/shield/esusers">
<nested>
<arg value="useradd"/>
<arg value="test_user"/>
<arg value="-p"/>
<arg value="changeme"/>
<arg value="-r"/>
<arg value="user"/>
</nested>
</run-script>
<startup-elasticsearch>
<additional-args>
<arg value="-Des.shield.authc.realms.custom.order=0"/>
<arg value="-Des.shield.authc.realms.custom.type=custom"/>
<arg value="-Des.shield.authc.realms.esusers.order=1"/>
<arg value="-Des.shield.authc.realms.esusers.type=esusers"/>
</additional-args>
</startup-elasticsearch>
<echo>Checking we can connect with basic auth on port ${integ.http.port}...</echo>
<local name="temp.file"/>
<tempfile property="temp.file" destdir="${java.io.tmpdir}"/>
<get src="http://127.0.0.1:${integ.http.port}" dest="${temp.file}"
username="test_user" password="changeme" verbose="true" retries="10"/>
</target>
</project>

View File

@ -0,0 +1,123 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>x-plugins-qa</artifactId>
<groupId>org.elasticsearch.qa</groupId>
<version>2.1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>shield-example-realm</artifactId>
<name>QA: Shield Example Realm</name>
<description>A basic example custom realm with tests to ensure the functionality works in Shield</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.example.ExampleRealmPlugin</elasticsearch.plugin.classname>
<elasticsearch.plugin.isolated>false</elasticsearch.plugin.isolated>
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<xplugins.list>license,shield,shield-example-realm</xplugins.list>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>integ-setup-dependencies</id>
<phase>pre-integration-test</phase>
<goals>
<goal>copy</goal>
</goals>
<configuration>
<skip>${skip.integ.tests}</skip>
<useBaseVersion>true</useBaseVersion>
<outputDirectory>${integ.deps}/plugins</outputDirectory>
<artifactItems>
<!-- elasticsearch distribution -->
<artifactItem>
<groupId>org.elasticsearch.distribution.zip</groupId>
<artifactId>elasticsearch</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
<outputDirectory>${integ.deps}</outputDirectory>
</artifactItem>
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
<artifactItem>
<groupId>${project.groupId}</groupId>
<artifactId>${project.artifactId}</artifactId>
<version>${project.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
</artifactItem>
</artifactItems>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,30 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example;
import org.elasticsearch.example.realm.CustomAuthenticationFailureHandler;
import org.elasticsearch.example.realm.CustomRealm;
import org.elasticsearch.example.realm.CustomRealmFactory;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.shield.authc.AuthenticationModule;
public class ExampleRealmPlugin extends Plugin {
@Override
public String name() {
return "custom realm example";
}
@Override
public String description() {
return "a very basic implementation of a custom realm to validate it works";
}
public void onModule(AuthenticationModule authenticationModule) {
authenticationModule.addCustomRealm(CustomRealm.TYPE, CustomRealmFactory.class);
authenticationModule.setAuthenticationFailureHandler(CustomAuthenticationFailureHandler.class);
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example.realm;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.shield.authc.AuthenticationToken;
import org.elasticsearch.shield.authc.DefaultAuthenticationFailureHandler;
import org.elasticsearch.transport.TransportMessage;
public class CustomAuthenticationFailureHandler extends DefaultAuthenticationFailureHandler {
@Override
public ElasticsearchSecurityException unsuccessfulAuthentication(RestRequest request, AuthenticationToken token) {
ElasticsearchSecurityException e = super.unsuccessfulAuthentication(request, token);
// set a custom header
e.addHeader("WWW-Authenticate", "custom-challenge");
return e;
}
@Override
public ElasticsearchSecurityException unsuccessfulAuthentication(TransportMessage message, AuthenticationToken token, String action) {
ElasticsearchSecurityException e = super.unsuccessfulAuthentication(message, token, action);
// set a custom header
e.addHeader("WWW-Authenticate", "custom-challenge");
return e;
}
@Override
public ElasticsearchSecurityException missingToken(RestRequest request) {
ElasticsearchSecurityException e = super.missingToken(request);
// set a custom header
e.addHeader("WWW-Authenticate", "custom-challenge");
return e;
}
@Override
public ElasticsearchSecurityException missingToken(TransportMessage message, String action) {
ElasticsearchSecurityException e = super.missingToken(message, action);
// set a custom header
e.addHeader("WWW-Authenticate", "custom-challenge");
return e;
}
}

View File

@ -0,0 +1,69 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example.realm;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.shield.User;
import org.elasticsearch.shield.authc.AuthenticationToken;
import org.elasticsearch.shield.authc.Realm;
import org.elasticsearch.shield.authc.RealmConfig;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.shield.authc.support.UsernamePasswordToken;
import org.elasticsearch.transport.TransportMessage;
public class CustomRealm extends Realm<UsernamePasswordToken> {
public static final String TYPE = "custom";
static final String USER_HEADER = "User";
static final String PW_HEADER = "Password";
static final String KNOWN_USER = "custom_user";
static final String KNOWN_PW = "changeme";
static final String[] ROLES = new String[] { "admin" };
public CustomRealm(RealmConfig config) {
super(TYPE, config);
}
@Override
public boolean supports(AuthenticationToken token) {
return token instanceof UsernamePasswordToken;
}
@Override
public UsernamePasswordToken token(RestRequest request) {
String user = request.header(USER_HEADER);
if (user != null) {
String password = request.header(PW_HEADER);
if (password != null) {
return new UsernamePasswordToken(user, new SecuredString(password.toCharArray()));
}
}
return null;
}
@Override
public UsernamePasswordToken token(TransportMessage<?> message) {
String user = message.getHeader(USER_HEADER);
if (user != null) {
String password = message.getHeader(PW_HEADER);
if (password != null) {
return new UsernamePasswordToken(user, new SecuredString(password.toCharArray()));
}
}
return null;
}
@Override
public User authenticate(UsernamePasswordToken token) {
final String actualUser = token.principal();
if (KNOWN_USER.equals(actualUser) && SecuredString.constantTimeEquals(token.credentials(), KNOWN_PW)) {
return new User.Simple(actualUser, ROLES);
}
return null;
}
}

View File

@ -0,0 +1,26 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example.realm;
import org.elasticsearch.shield.authc.Realm;
import org.elasticsearch.shield.authc.RealmConfig;
public class CustomRealmFactory extends Realm.Factory<CustomRealm> {
public CustomRealmFactory() {
super(CustomRealm.TYPE, false);
}
@Override
public CustomRealm create(RealmConfig config) {
return new CustomRealm(config);
}
@Override
public CustomRealm createDefault(String name) {
return null;
}
}

View File

@ -0,0 +1,99 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example.realm;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.NoNodeAvailableException;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.shield.ShieldPlugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.rest.client.http.HttpResponse;
import org.junit.Test;
import static org.hamcrest.Matchers.*;
/**
* Integration test to test authentication with the custom realm
*/
public class CustomRealmIT extends ESIntegTestCase {
@Override
protected Settings externalClusterClientSettings() {
return Settings.builder()
.put("plugin.types", ShieldPlugin.class.getName())
.put(Headers.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER)
.put(Headers.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW)
.build();
}
@Test
public void testHttpConnectionWithNoAuthentication() throws Exception {
HttpResponse response = httpClient().path("/").execute();
assertThat(response.getStatusCode(), is(401));
String value = response.getHeaders().get("WWW-Authenticate");
assertThat(value, is("custom-challenge"));
}
@Test
public void testHttpAuthentication() throws Exception {
HttpResponse response = httpClient().path("/")
.addHeader(CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER)
.addHeader(CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW)
.execute();
assertThat(response.getStatusCode(), is(200));
}
@Test
public void testTransportClient() throws Exception {
NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
NodeInfo[] nodes = nodeInfos.getNodes();
assertTrue(nodes.length > 0);
TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress();
String clusterName = nodeInfos.getClusterNameAsString();
Settings settings = Settings.builder()
.put("path.home", createTempDir())
.put("plugin.types", ShieldPlugin.class.getName())
.put("cluster.name", clusterName)
.put(Headers.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER)
.put(Headers.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW)
.build();
try (TransportClient client = TransportClient.builder().settings(settings).build()) {
client.addTransportAddress(publishAddress);
ClusterHealthResponse response = client.admin().cluster().prepareHealth().execute().actionGet();
assertThat(response.isTimedOut(), is(false));
}
}
@Test
public void testTransportClientWrongAuthentication() throws Exception {
NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
NodeInfo[] nodes = nodeInfos.getNodes();
assertTrue(nodes.length > 0);
TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress();
String clusterName = nodeInfos.getClusterNameAsString();
Settings settings = Settings.builder()
.put("path.home", createTempDir())
.put("plugin.types", ShieldPlugin.class.getName())
.put("cluster.name", clusterName)
.put(Headers.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER + randomAsciiOfLength(1))
.put(Headers.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW)
.build();
try (TransportClient client = TransportClient.builder().settings(settings).build()) {
client.addTransportAddress(publishAddress);
client.admin().cluster().prepareHealth().execute().actionGet();
fail("authentication failure should have resulted in a NoNodesAvailableException");
} catch (NoNodeAvailableException e) {
// expected
}
}
}

View File

@ -0,0 +1,39 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.example.realm;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.shield.User;
import org.elasticsearch.shield.authc.RealmConfig;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.shield.authc.support.UsernamePasswordToken;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import static org.hamcrest.Matchers.*;
public class CustomRealmTests extends ESTestCase {
@Test
public void testAuthenticate() {
Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build();
CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings));
UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER, new SecuredString(CustomRealm.KNOWN_PW.toCharArray()));
User user = realm.authenticate(token);
assertThat(user, notNullValue());
assertThat(user.roles(), equalTo(CustomRealm.ROLES));
assertThat(user.principal(), equalTo(CustomRealm.KNOWN_USER));
}
@Test
public void testAuthenticateBadUser() {
Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build();
CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings));
UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER + "1", new SecuredString(CustomRealm.KNOWN_PW.toCharArray()));
User user = realm.authenticate(token);
assertThat(user, nullValue());
}
}

View File

@ -8,7 +8,7 @@
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
</parent>
<!--
@ -25,7 +25,7 @@
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.suite>smoke_test_plugins_ssl</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
<xplugins.list>elasticsearch-license,elasticsearch-marvel,elasticsearch-shield,elasticsearch-watcher</xplugins.list>
<xplugins.list>license,marvel,shield,watcher</xplugins.list>
<ssl.antfile>${project.basedir}/ssl-setup.xml</ssl.antfile>
<keystore.path>${project.build.outputDirectory}/test-node.jks</keystore.path>
</properties>
@ -33,8 +33,8 @@
<dependencies>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
@ -70,7 +70,7 @@
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -78,7 +78,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-watcher</artifactId>
<artifactId>watcher</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -86,7 +86,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -94,7 +94,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-marvel</artifactId>
<artifactId>marvel</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -103,7 +103,7 @@
<!-- open-source plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-kuromoji</artifactId>
<artifactId>analysis-kuromoji</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -111,7 +111,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-smartcn</artifactId>
<artifactId>analysis-smartcn</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -119,7 +119,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-stempel</artifactId>
<artifactId>analysis-stempel</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -127,7 +127,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-phonetic</artifactId>
<artifactId>analysis-phonetic</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -135,7 +135,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-icu</artifactId>
<artifactId>analysis-icu</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -143,7 +143,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-gce</artifactId>
<artifactId>cloud-gce</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -151,7 +151,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-azure</artifactId>
<artifactId>cloud-azure</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -159,7 +159,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-aws</artifactId>
<artifactId>cloud-aws</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -167,7 +167,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-delete-by-query</artifactId>
<artifactId>delete-by-query</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -175,7 +175,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-lang-python</artifactId>
<artifactId>lang-python</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -183,7 +183,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-lang-javascript</artifactId>
<artifactId>lang-javascript</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -191,7 +191,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-mapper-size</artifactId>
<artifactId>mapper-size</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -199,7 +199,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-site-example</artifactId>
<artifactId>site-example</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>

View File

@ -3,80 +3,6 @@
xmlns:ac="antlib:net.sf.antcontrib">
<import file="${elasticsearch.integ.antfile.default}"/>
<import file="${elasticsearch.tools.directory}/ant/shield-overrides.xml"/>
<!-- redefined to work with auth -->
<macrodef name="waitfor-elasticsearch">
<attribute name="port"/>
<attribute name="timeoutproperty"/>
<sequential>
<echo>Waiting for elasticsearch to become available on port @{port}...</echo>
<waitfor maxwait="30" maxwaitunit="second"
checkevery="500" checkeveryunit="millisecond"
timeoutproperty="@{timeoutproperty}">
<socket server="127.0.0.1" port="@{port}"/>
</waitfor>
</sequential>
</macrodef>
<target name="start-external-cluster-with-plugins" depends="setup-workspace">
<ac:for list="${xplugins.list}" param="xplugin.name">
<sequential>
<fail message="Expected @{xplugin.name}-${version}.zip as a dependency, but could not be found in ${integ.deps}/plugins}">
<condition>
<not>
<available file="${integ.deps}/plugins/@{xplugin.name}-${elasticsearch.version}.zip" />
</not>
</condition>
</fail>
</sequential>
</ac:for>
<ac:for param="file">
<path>
<fileset dir="${integ.deps}/plugins"/>
</path>
<sequential>
<local name="plugin.name"/>
<convert-plugin-name file="@{file}" outputproperty="plugin.name"/>
<install-plugin name="${plugin.name}" file="@{file}"/>
</sequential>
</ac:for>
<local name="home"/>
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
<echo>Setting up Shield auth</echo>
<run-script script="${home}/bin/shield/esusers">
<nested>
<arg value="useradd"/>
<arg value="test_user"/>
<arg value="-p"/>
<arg value="changeme"/>
<arg value="-r"/>
<arg value="admin"/>
</nested>
</run-script>
<run-script script="${home}/bin/shield/esusers">
<nested>
<arg value="useradd"/>
<arg value="marvel_export"/>
<arg value="-p"/>
<arg value="changeme"/>
<arg value="-r"/>
<arg value="marvel_agent"/>
</nested>
</run-script>
<startup-elasticsearch>
<additional-args>
<arg value="-Des.marvel.agent.exporter.es.hosts=http://marvel_export:changeme@localhost:${integ.http.port}"/>
</additional-args>
</startup-elasticsearch>
<echo>Checking we can connect with basic auth on port ${integ.http.port}...</echo>
<local name="temp.file"/>
<tempfile property="temp.file" destdir="${java.io.tmpdir}"/>
<get src="http://127.0.0.1:${integ.http.port}" dest="${temp.file}"
username="test_user" password="changeme" verbose="true" retries="10"/>
</target>
</project>

View File

@ -8,7 +8,7 @@
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
</parent>
<!--
@ -25,14 +25,14 @@
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.suite>smoke_test_plugins</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
<xplugins.list>elasticsearch-license,elasticsearch-marvel,elasticsearch-shield,elasticsearch-watcher</xplugins.list>
<xplugins.list>license,marvel,shield,watcher</xplugins.list>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
@ -68,7 +68,7 @@
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -76,7 +76,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-watcher</artifactId>
<artifactId>watcher</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -84,7 +84,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -92,7 +92,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-marvel</artifactId>
<artifactId>marvel</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -101,7 +101,7 @@
<!-- open-source plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-kuromoji</artifactId>
<artifactId>analysis-kuromoji</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -109,7 +109,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-smartcn</artifactId>
<artifactId>analysis-smartcn</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -117,7 +117,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-stempel</artifactId>
<artifactId>analysis-stempel</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -125,7 +125,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-phonetic</artifactId>
<artifactId>analysis-phonetic</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -133,7 +133,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-analysis-icu</artifactId>
<artifactId>analysis-icu</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -141,7 +141,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-gce</artifactId>
<artifactId>cloud-gce</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -149,7 +149,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-azure</artifactId>
<artifactId>cloud-azure</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -157,7 +157,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-cloud-aws</artifactId>
<artifactId>cloud-aws</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -165,7 +165,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-delete-by-query</artifactId>
<artifactId>delete-by-query</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -173,7 +173,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-lang-python</artifactId>
<artifactId>lang-python</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -181,7 +181,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-lang-javascript</artifactId>
<artifactId>lang-javascript</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -189,7 +189,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-mapper-size</artifactId>
<artifactId>mapper-size</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -197,7 +197,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-site-example</artifactId>
<artifactId>site-example</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>

View File

@ -25,7 +25,7 @@
<parent>
<groupId>org.elasticsearch.qa</groupId>
<artifactId>x-plugins-qa</artifactId>
<version>2.0.0-beta1-SNAPSHOT</version>
<version>2.1.0-SNAPSHOT</version>
</parent>
<artifactId>smoke-test-watcher-with-shield</artifactId>
@ -36,26 +36,26 @@
<skip.unit.tests>true</skip.unit.tests>
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
<xplugins.list>elasticsearch-license,elasticsearch-shield,elasticsearch-watcher</xplugins.list>
<xplugins.list>license,shield,watcher</xplugins.list>
<tests.rest.blacklist>hijack/10_basic/*</tests.rest.blacklist>
</properties>
<dependencies>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>watcher</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-watcher</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
@ -126,7 +126,7 @@
<!-- commercial plugins -->
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-license</artifactId>
<artifactId>license</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -134,7 +134,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-watcher</artifactId>
<artifactId>watcher</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>
@ -142,7 +142,7 @@
<artifactItem>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<artifactId>shield</artifactId>
<version>${elasticsearch.version}</version>
<type>zip</type>
<overWrite>true</overWrite>

View File

@ -12,6 +12,7 @@ import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.plugin.LicensePlugin;
import org.elasticsearch.shield.ShieldPlugin;
@ -48,7 +49,7 @@ public class WatcherWithShieldIT extends ESRestTestCase {
public void startWatcher() throws Exception {
try(CloseableHttpClient client = HttpClients.createMinimal(new BasicHttpClientConnectionManager())) {
InetSocketAddress address = cluster().httpAddresses()[0];
HttpPut request = new HttpPut(new URI("http", null, address.getAddress().getHostAddress(), address.getPort(), "/_watcher/_start", null, null));
HttpPut request = new HttpPut(new URI("http", null, NetworkAddress.formatAddress(address.getAddress()), address.getPort(), "/_watcher/_start", null, null));
String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecuredString(TEST_ADMIN_PASSWORD.toCharArray()));
request.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, token);
client.execute(request);
@ -59,7 +60,7 @@ public class WatcherWithShieldIT extends ESRestTestCase {
public void stopWatcher() throws Exception {
try(CloseableHttpClient client = HttpClients.createMinimal(new BasicHttpClientConnectionManager())) {
InetSocketAddress address = cluster().httpAddresses()[0];
HttpPut request = new HttpPut(new URI("http", null, address.getAddress().getHostAddress(), address.getPort(), "/_watcher/_stop", null, null));
HttpPut request = new HttpPut(new URI("http", null, NetworkAddress.formatAddress(address.getAddress()), address.getPort(), "/_watcher/_stop", null, null));
String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecuredString(TEST_ADMIN_PASSWORD.toCharArray()));
request.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, token);
client.execute(request);

View File

@ -20,7 +20,7 @@
<target name="start-external-cluster-with-plugin" depends="setup-workspace">
<local name="home"/>
<property name="home" location="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
<install-plugin name="elasticsearch-license" file="${integ.deps}/elasticsearch-license-${project.version}.zip" />
<install-plugin name="license" file="${integ.deps}/license-${project.version}.zip" />
<install-plugin name="${project.artifactId}" file="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"/>
<run-script script="${home}/bin/shield/esusers">
<nested>

View File

@ -207,7 +207,7 @@ The following tables describe the possible attributes each entry type can carry
[float]
=== Audit Logs Settings
As mentioned above, the audit logs are configured in the `logging.yml` file located in `ES_HOME/config/shield`. The following snippet shows the default logging configuration:
As mentioned above, the audit logs are configured in the `logging.yml` file located in `CONFIG_DIR/shield`. The following snippet shows the default logging configuration:
[[logging-file]]

View File

@ -15,7 +15,7 @@ To use the transport client with Shield, you need to:
`transport_client` role is defined in `roles.yml` that grants the `cluster:monitor/nodes/info` cluster permission. The transport client uses the node info API to fetch information about the nodes in the cluster. If the client is configured to use sniffing, you need to add the
`cluster:monitor/state` cluster permission to the `transport_client` role.
. Add the Shield JAR file (`elasticsearch-shield-2.0.0.jar`) to your CLASSPATH. You can download the Shield distribution and extract the JAR file manually or you can get it from the http://maven.elasticsearch.org/releases/org/elasticsearch/plugin/elasticsearch-shield/2.0.0/elasticsearch-shield-2.0.0.jar[Elasticsearch Maven repository].
. Add the Shield JAR file (`shield-2.1.0.jar`) to your CLASSPATH. You can download the Shield distribution and extract the JAR file manually or you can get it from the http://maven.elasticsearch.org/releases/org/elasticsearch/plugin/shield/{version}/shield-{version}.jar[Elasticsearch Maven repository].
+
If you are using Maven, you need to add the Shield JAR file as a dependency in your project's `pom.xml` file:
+
@ -43,8 +43,8 @@ If you are using Maven, you need to add the Shield JAR file as a dependency in y
<!-- add the shield jar as a dependency -->
<dependency>
<groupId>org.elasticsearch.plugin</groupId>
<artifactId>elasticsearch-shield</artifactId>
<version>2.0.0</version>
<artifactId>shield</artifactId>
<version>2.1.0</version>
</dependency>
...
</dependencies>
@ -69,7 +69,7 @@ repositories {
dependencies {
// Provide the Shield jar on the classpath for compilation and at runtime
// Note: Many projects can use the Shield jar as a runtime dependency
compile "org.elasticsearch.plugin:elasticsearch-shield:2.0.0"
compile "org.elasticsearch.plugin:shield:2.1.0"
/* ... */
}
@ -82,9 +82,14 @@ dependencies {
import org.elasticsearch.client.transport.TransportClient;
...
TransportClient client = new TransportClient(ImmutableSettings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
TransportClient client = TransportClient.builder()
.settings(Settings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
.put("plugin.types", "org.elasticsearch.shield.ShieldPlugin")
.put("path.home", "/path/to/client/home")
...
.build())
.addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
.addTransportAddress(new InetSocketTransportAddress("localhost", 9301));
-------------------------------------------------------------------------------------------------
@ -97,15 +102,23 @@ For example, the following snippet adds the `Authorization` header to a search r
+
[source,java]
--------------------------------------------------------------------------------------------------
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.shield.authc.support.SecuredString;
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
...
TransportClient client = new TransportClient(ImmutableSettings.builder()
.put("shield.user", "transport_client_user:changeme")
...
TransportClient client = TransportClient.builder()
.settings(Settings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
.put("plugin.types", "org.elasticsearch.shield.ShieldPlugin")
.put("path.home", "/path/to/client/home")
...
.build())
.build()
.addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
.addTransportAddress(new InetSocketTransportAddress("localhost", 9301));
@ -126,15 +139,19 @@ NOTE: Client authentication is enabled by default. For information about disabli
import org.elasticsearch.client.transport.TransportClient;
...
TransportClient client = new TransportClient(ImmutableSettings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
.put("shield.ssl.keystore.path", "/path/to/client.jks") (1)
.put("shield.ssl.keystore.password", "password")
...
TransportClient client = TransportClient.builder()
.settings(Settings.builder()
.put("cluster.name", "myClusterName")
.put("plugin.types", "org.elasticsearch.shield.ShieldPlugin")
.put("shield.user", "transport_client_user:changeme")
.put("shield.ssl.keystore.path", "/path/to/client.jks") (1)
.put("shield.ssl.keystore.password", "password")
.put("path.home", "/path/to/client/home")
...
.build());
--------------------------------------------------------------------------------------------------
+
(1) The `client.jks` keystore must contain the client's signed CA certificate and the CA certificate.
(1) The `client.jks` keystore must contain the client's signed certificate and the CA certificate.
+
.. Enable the SSL transport by setting `shield.transport.ssl` to `true` in the client configuration.
+
@ -143,12 +160,17 @@ TransportClient client = new TransportClient(ImmutableSettings.builder()
import org.elasticsearch.client.transport.TransportClient;
...
TransportClient client = new TransportClient(ImmutableSettings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
.put("shield.ssl.keystore.path", "/path/to/client.jks") (1)
.put("shield.ssl.keystore.password", "password")
.put("shield.transport.ssl", "true"))
TransportClient client = TransportClient.builder()
.settings(Settings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "transport_client_user:changeme")
.put("shield.ssl.keystore.path", "/path/to/client.jks") (1)
.put("shield.ssl.keystore.password", "password")
.put("shield.transport.ssl", "true")
.put("plugin.types", "org.elasticsearch.shield.ShieldPlugin")
.put("path.home", "/path/to/client/home")
...
.build())
.addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
.addTransportAddress(new InetSocketTransportAddress("localhost", 9301));
--------------------------------------------------------------------------------------------------
@ -166,12 +188,17 @@ If you are not using client authentication and sign the Elasticsearch node certi
import org.elasticsearch.client.transport.TransportClient;
...
TransportClient client = new TransportClient(ImmutableSettings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "test_user:changeme")
.put("shield.ssl.truststore.path", "/path/to/truststore.jks") (1)
.put("shield.ssl.truststore.password", "password")
.put("shield.transport.ssl", "true"))
TransportClient client = TransportClient.builder()
.settings(Settings.builder()
.put("cluster.name", "myClusterName")
.put("shield.user", "test_user:changeme")
.put("shield.ssl.truststore.path", "/path/to/truststore.jks") (1)
.put("shield.ssl.truststore.password", "password")
.put("shield.transport.ssl", "true")
.put("plugin.types", "org.elasticsearch.shield.ShieldPlugin")
.put("path.home", "/path/to/client/home")
...
.build())
.addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
.addTransportAddress(new InetSocketTransportAddress("localhost", 9301));
------------------------------------------------------------------------------------------------------
@ -198,7 +225,7 @@ The following example shows how you can clear Shield's realm caches using `Shiel
import static org.elasticsearch.node.NodeBuilder.*;
...
Client client = ... // create the client (either transport or node)
Client client = ... // create the transport client
ShieldClient shieldClient = new ShieldClient(client);
ClearRealmCacheResponse response = shieldClient.authc().prepareClearRealmCache()

View File

@ -25,7 +25,7 @@ As an administrator, you will need to define the roles that you want to use, the
[[defining-roles]]
=== Defining Roles
Roles are defined in the role definition file `roles.yml` located in `ES_HOME/config/shield`.
Roles are defined in the role definition file `roles.yml` located in `CONFIG_DIR/shield`.
This is a YAML file where each entry defines the unique role name and the cluster and indices permissions associated
with it.

View File

@ -2,7 +2,7 @@
== Getting Started with Shield
This getting started guide walks you through installing Shield, setting up basic authentication, and getting started with role-based
access control. You can install Shield on nodes running Elasticsearch 1.5 or later.
access control. You can install Shield on nodes running Elasticsearch {version}.
IMPORTANT: The Shield plugin must be installed on every node in the cluster and every
node must be restarted after installation. Plan for a complete cluster restart before beginning the installation process.

View File

@ -12,9 +12,7 @@ To enable message authentication:
bin/shield/syskeygen
----------------
+
This creates a system key file in `ES_HOME/config/shield/system_key`. You
can customize this file's location by changing the value of the `shield.system_key.file` setting in
`elasticsearch.yml`.
This creates a system key file in `CONFIG_DIR/shield/system_key`.
. Copy the genererated system key to the rest of the nodes in the cluster.

View File

@ -3,6 +3,7 @@
= Shield Reference
:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current
:version: 2.1.0
include::introduction.asciidoc[]

View File

@ -9,11 +9,11 @@ node must be restarted after installation. Plan for a complete cluster restart b
[float]
=== Shield Installation Prerequisites
To install Shield 2.x, you need:
To install Shield {version}, you need:
* Java 7 or later
* Elasticsearch 2.x
* Elasticsearch License plugin 2.x
* Elasticsearch {version}
* Elasticsearch License plugin {version}
For information about installing the latest Oracle JDK, see http://www.oracle.com/technetwork/java/javase/downloads/index-jsp-138363.html[Java SE Downloads]. For information about installing Elasticsearch, see http://www.elastic.co/guide/en/elasticsearch/reference/current/_installation.html[Installation] in the Elasticsearch Reference.
@ -21,54 +21,51 @@ For information about installing the latest Oracle JDK, see http://www.oracle.co
[[deb-rpm-install]]
=== Installing Shield on a DEB/RPM Package Installation
If you install Elasticsearch as a package or you specify a custom configuration directory, the command line
tools require you to specify the configuration directory. On Linux systems, add the following line to your
`.profile` file:
If you use the DEB/RPM packages to install Elasticsearch, by default Elasticsearch is installed in
`/usr/share/elasticsearch` and the configuration files are stored in `/etc/elasticsearch`. (For the
complete list of default paths, see {ref}/setup-dir-layout.html#_deb_and_rpm[Directory Layout] in
the Elasticsearch Reference.)
To install the Shield and License plugins on a DEB/RPM package installation, you need to run
`bin/plugin -i` from the `/usr/share/elasticsearch` directory with superuser permissions, and
specify the location of the configuration files by setting `-Des.path.conf`. For example:
[source,shell]
----------------------------------------------------------
export ES_JAVA_OPTS="-Des.path.conf=/etc/elasticsearch"
cd /usr/share/elasticsearch
sudo bin/plugin -i elasticsearch/license/latest -Des.path.conf=/etc/elasticsearch
sudo bin/plugin -i elasticsearch/shield/latest -Des.path.conf=/etc/elasticsearch
----------------------------------------------------------
NOTE: When using `sudo` to run commands as a different user, the `ES_JAVA_OPTS` setting from your profile will not be
available in the other user's environment. You can manually pass the environment variables to the command or you can
make the environment variable available by adding the following line to the `/etc/sudoers` file:
[source,shell]
----------------------------------------------------------
Defaults env_keep += "ES_JAVA_OPTS"
----------------------------------------------------------
On Windows systems, the `setx` command can be used to specify a custom configuration directory:
[source,shell]
----------------------------------------------------------
setx ES_JAVA_OPTS "-Des.path.conf=C:\config"
----------------------------------------------------------
If your server doesn't have direct Internet access, see <<offline-install,Installing Shield on Offline Machines>> for information about downloading the Shield binaries.
NOTE: If you are using a version of Shield prior to 1.3, you also need to specify the location
of the configuration files when running `esusers` and `syskeygen`.
[float]
[[offline-install]]
=== Installing Shield on Offline Machines
Elasticsearchs `bin/plugin` script requires direct Internet access for downloading and installing the security plugin.
If your server doesnt have Internet access, you can download the required binaries from the following link:
Elasticsearchs `bin/plugin` script requires direct Internet access to download and install the
License and Shield plugins. If your server doesnt have Internet access, you can manually
download and install the plugins.
[source,sh]
----------------------------------------------------
https://download.elastic.co/elasticsearch/shield/shield-1.3.0.zip
----------------------------------------------------
To install Shield on a machine that doesn't have Internet access:
Transfer the compressed file to your server, then install the plugin with the `bin/plugin` script:
. Manually download the appropriate License and Shield binaries:
** https://download.elastic.co/elasticsearch/license/license-latest.zip[
`https://download.elastic.co/elasticsearch/license/license-latest.zip`]
** https://download.elastic.co/elasticsearch/shield/shield-latest.zip[
`https://download.elastic.co/elasticsearch/shield/shield-latest.zip`]
. Transfer the zip files to the offline machine.
. Run `bin/plugin` with the `-u` option to install the plugins using the zip files. For example:
+
[source,shell]
----------------------------------------------------
bin/plugin install shield -u file://PATH_TO_ZIP_FILE <1>
----------------------------------------------------
<1> Absolute path to Shield plugin zip distribution file (e.g. `file:///path/to/file/shield-1.3.0.zip`,
note the three slashes at the beginning)
----------------------------------------------------------
bin/plugin -i license -u file:///path/to/file/license-2.1.0.zip <1>
bin/plugin -i shield -u file:///path/to/file/shield-2.1.0.zip
----------------------------------------------------------
<1> Note that you must specify an absolute path to the zip file after the `file://` protocol.
[float]
[[tribe-node]]
@ -137,8 +134,7 @@ The above command needs to be executed on each cluster, since the same user need
as on every connected cluster.
The following is the configuration required on the Tribe Node, that needs to be added to `elasticsearch.yml`.
Elasticsearch allows to list specific settings per cluster. We disable multicast discovery as described in the
<<disable-multicast, Disable Multicast section>> and configure the proper unicast discovery hosts for each cluster,
Elasticsearch allows to list specific settings per cluster. We disable multicast discovery and configure the proper unicast discovery hosts for each cluster,
as well as their cluster names:
[source,yaml]

View File

@ -140,7 +140,7 @@ userdel <username>
[float]
=== About `esusers`
The `esusers` tool manipulates two files, `users` and `users_roles`, in `ES_HOME/config/shield/`. These two files store all user data for the _esusers_ realm and are read by Shield
The `esusers` tool manipulates two files, `users` and `users_roles`, in `CONFIG_DIR/shield/`. These two files store all user data for the _esusers_ realm and are read by Shield
on startup.
By default, Shield checks these files for changes every 5 seconds. You can change this default behavior by changing the

View File

@ -163,7 +163,7 @@ The parameters listed in this section are configured in the `config/elasticsearc
[options="header"]
|======
| Name | Default | Description
| `shield.system_key.file` |`ES_HOME/config/shield/system_key` | Sets the location of the `system_key` file. For more information, see <<enable-message-authentication,Enabling Message Authentication>>.
| `shield.system_key.file` |`CONFIG_DIR/shield/system_key` | Sets the <<ref-shield-files-location,location>> of the `system_key` file. For more information, see <<enable-message-authentication,Enabling Message Authentication>>.
|======
[[ref-anonymous-access]]
@ -221,8 +221,8 @@ shield.authc.realms:
[options="header"]
|======
| Name | Required | Default | Description
| `files.users` | no | `ES_HOME/config/shield/users` | The location of the <<users-file, users>> file.
| `files.users_roles` | no | `ES_HOME/config/shield/users_roles`| The location of the <<users_defining-roles, users_roles>> file.
| `files.users` | no | `CONFIG_DIR/shield/users` | The <<ref-shield-files-location,location>> of the <<users-file, users>> file.
| `files.users_roles` | no | `CONFIG_DIR/shield/users_roles`| The <<ref-shield-files-location,location>> of the <<users_defining-roles, users_roles>> file.
| `cache.ttl` | no | `20m` | The time-to-live for cached user entries--user credentials are cached for this configured period of time. Defaults to `20m`. Specify values using the standard Elasticsearch {ref}/common-options.html#time-units[time units].
| `cache.max_users` | no | 100000 | The maximum number of user entries that can live in the cache at a given time. Defaults to 100,000.
| `cache.hash_algo` | no | `ssha256` | (Expert Setting) The hashing algorithm that is used for the in-memory cached user credentials. See the <<ref-cache-hash-algo,Cache hash algorithms>> table for all possible values.
@ -251,7 +251,7 @@ shield.authc.realms:
| `group_search.filter` | no | See description | When not set, the realm will search for `group`, `groupOfNames`, or `groupOfUniqueNames`, with the attributes `member` or `memberOf`. Any instance of `{0}` in the filter will be replaced by the user attribute defined in `group_search.user_attribute`
| `group_search.user_attribute` | no | Empty | Specifies the user attribute that will be fetched and provided as a parameter to the filter. If not set, the user DN is passed into the filter.
| `unmapped_groups_as_roles` | no | false | Takes a boolean variable. When this element is set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. THe default value is `false`.
| `files.role_mapping` | no | `ES_HOME/config/shield/users/role_mapping.yml` | The path and file name for the <<ldap-role-mapping, YAML role mapping configuration file>>.
| `files.role_mapping` | no | `CONFIG_DIR/shield/users/role_mapping.yml` | The <<ref-shield-files-location,location>> for the <<ldap-role-mapping, YAML role mapping configuration file>>.
| `follow_referrals` | no | `true` | Boolean value that specifies whether Shield should follow referrals returned by the LDAP server. Referrals are URLs returned by the server that are to be used to continue the LDAP operation (e.g. search).
| `connect_timeout` | no | "5s" - for 5 seconds | The timeout period for establishing an LDAP connection. An `s` at the end indicates seconds, or `ms` indicates milliseconds.
| `read_timeout` | no | "5s" - for 5 seconds | The timeout period for an LDAP operation. An `s` at the end indicates seconds, or `ms` indicates milliseconds.
@ -271,7 +271,7 @@ NOTE: `user_dn_templates` is required to operate in user template mode and `user
| `url` | no | `ldap://<domain_name>:389` | A URL in the format `ldap[s]://<server>:<port>` If not specified the URL will be derived from the domain_name, assuming clear-text `ldap` and port `389` (e.g. `ldap://<domain_name>:389`).
| `domain_name` | yes | - | The domain name of Active Directory. The cluster can derive the URL and `user_search_dn` fields from values in this element if those fields are not otherwise specified.
| `unmapped_groups_as_roles` | no | false | Takes a boolean variable. When this element is set to `true`, the names of any unmapped groups and the user's relative distinguished name are used as role names and assigned to the user. THe default value is `false`.
| `files.role_mapping` | no | `ES_HOME/config/shield/users/role_mapping.yml` | The path and file name for the <<ad-role-mapping, YAML role mapping configuration file>>.
| `files.role_mapping` | no | `CONFIG_DIR/shield/users/role_mapping.yml` | The <<ref-shield-files-location,location>> for the <<ad-role-mapping, YAML role mapping configuration file>>.
| `user_search.base_dn` | no | Root of Active Directory | The context to search for a user. The default value for this element is the root of the Active Directory domain.
| `user_search.scope` | no | `sub_tree` | Specifies whether the user search should be `sub_tree`, `one_level` or `base`. `one_level` only searches users directly contained within the `base_dn`. `sub_tree` searches all objects contained under `base_dn`. `base` specifies that the `base_dn` is a user object, and that it is the only user considered.
| `user_search.filter` | no | See description | Specifies a filter to use to lookup a user given a username. The default filter looks up `user` objects with either `sAMAccountName` or `userPrincipalName`
@ -296,7 +296,7 @@ NOTE: `user_dn_templates` is required to operate in user template mode and `user
| `truststore.path` | no | `shield.ssl.keystore` | The path of a truststore to use. The default truststore is the one defined by <<ref-ssl-tls-settings,SSL/TLS settings>>
| `truststore.password` | no | - | The password to the truststore. Must be provided if `truststore.path` is set.
| `truststore.algorithm` | no | SunX509 | Algorithm for the trustsore. Default is `SunX509`
| `files.role_mapping` | no | `ES_HOME/config/shield/users/role_mapping.yml` | Specifies the path and file name for the <<pki-role-mapping, YAML role mapping configuration file>>.
| `files.role_mapping` | no | `CONFIG_DIR/shield/users/role_mapping.yml` | Specifies the <<ref-shield-files-location,location>> for the <<pki-role-mapping, YAML role mapping configuration file>>.
|======
[[ref-cache-hash-algo]]
@ -324,7 +324,7 @@ NOTE: `user_dn_templates` is required to operate in user template mode and `user
[options="header"]
|======
| Name | Default | Description
| `shield.authz.store.file.roles` | `ES_HOME/config/shield/users/roles.yml` | The location of the roles definition file.
| `shield.authz.store.file.roles` | `CONFIG_DIR/shield/users/roles.yml` | The <<ref-shield-files-location,location>> of the roles definition file.
|======
[[ref-ssl-tls-settings]]
@ -392,13 +392,18 @@ NOTE: `user_dn_templates` is required to operate in user template mode and `user
The Shield security plugin uses the following files:
* `config/shield/roles.yml` defines the roles in use on the cluster (read more <<defining-roles,here>>).
* `config/shield/users` defines the hashed passwords for users on the cluster (read more <<users-file,here>>).
* `config/shield/users_roles` defines the role assignments for users on the cluster (read more <<users_defining-roles,here>>).
* `config/shield/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for
* `CONFIG_DIR/shield/roles.yml` defines the roles in use on the cluster (read more <<defining-roles,here>>).
* `CONFIG_DIR/shield/users` defines the hashed passwords for users on the cluster (read more <<users-file,here>>).
* `CONFIG_DIR/shield/users_roles` defines the role assignments for users on the cluster (read more <<users_defining-roles,here>>).
* `CONFIG_DIR/shield/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for
LDAP and Active Directory groups and users and PKI users to be mapped to roles (read more <<ldap-role-mapping,here>>).
* `config/shield/logging.yml` contains audit information (read more <<logging-file,here>>).
* `config/shield/system_key` holds a cluster secret key used for message authentication. For more information, see <<enable-message-authentication,Enabling Message Authentication>>.
* `CONFIG_DIR/shield/logging.yml` contains audit information (read more <<logging-file,here>>).
* `CONFIG_DIR/shield/system_key` holds a cluster secret key used for message authentication. For more information, see <<enable-message-authentication,Enabling Message Authentication>>.
[[ref-shield-files-location]]
IMPORTANT: Any files that Shield uses must be stored in the Elasticsearch {ref}/setup-dir-layout.html#setup-dir-layout[configuration directory].
Elasticsearch runs with restricted permissions and is only permitted to read from the locations configured in the directory
layout for enhanced security.
Several of these files are in the YAML format. When you edit these files, be aware that YAML is indentation-level
sensitive and indentation errors can lead to configuration errors. Avoid the tab character to set indentation levels,

View File

@ -4,10 +4,10 @@
[float]
[[version-compatibility]]
=== Version Compatibility
Shield 2.x is compatible with:
Shield {version} is compatible with:
* Elasticsearch: 2.x
* License plugin: 2.x
* Elasticsearch: {version}
* License plugin: {version}
[float]
[[upgrade-instructions]]
@ -41,6 +41,11 @@ version of Shield. We recommend copying the changes listed below to your `roles.
[[changelist]]
=== Change List
[float]
==== 2.0.0-beta1
This release is primarily for compatibility with Elasticsearch 2.0.0-beta1.
[float]
==== 1.3.2

View File

@ -1,133 +1,108 @@
[[ssl-tls]]
=== Setting Up SSL/TLS on a Cluster
Shield allows for the installation of X.509 certificates that establish trust between nodes. When a client connects to a
node using SSL or TLS, the node will present its certificate to the client, and then as part of the handshake process the
node will prove that it owns the private key linked with the certificate. The client will then determine if the node's
certificate is valid, trusted, and matches the hostname or IP address it is trying to connect to. A node also acts as a
client when connecting to other nodes in the cluster, which means that every node must trust all of the other nodes in
the cluster.
Shield enables you to encrypt traffic to and from nodes in your Elasticsearch cluster. Connections
are secured using Transport Layer Security (TLS).
The certificates used for SSL and TLS can be signed by a certificate authority (CA) or self-signed. The type of signing
affects how a client will trust these certificates. Self-signed certificates must be trusted individually, which means
that each node must have every other node's certificate installed. Certificates signed by a CA, can be trusted through
validation that the CA signed the certificate. This means that every node will only need the signing CA certificate
installed to trust the other nodes in the cluster.
WARNING: Nodes that do not have encryption enabled send passwords in plain text.
The best practice with Shield is to use certificates signed by a CA. Self-signed certificates introduce a lot of
overhead as they require each client to trust every self-signed certificate. Self-signed certificates also limit
the elasticity of Elasticsearch as adding a new node to the cluster requires a restart of every node after
installing the new node's certificate. This overhead is not present when using a CA as a new node only needs a
certificate signed by the CA to establish trust with the other nodes in the cluster.
To enable encryption, you need to perform the following steps on each node in the cluster:
Many organizations have a CA to sign certificates for each nodes. If not, see
<<certificate-authority, Setting Up a Certificate Authority>> for instructions on setting up a CA.
. <<installing-node-certificates, Install an X.509 certificate>>.
The following steps will need to be repeated on each node to setup SSL/TLS:
. <<configure-ssl, Configure the node>> to:
.. Identify itself using its signed certificate.
.. Enable SSL on the transport and HTTP layers.
.. Disable multicast.
* Install the CA certificate in the node's keystore
* Generate a private key and certificate for the node
* Create a signing request for the new node certificate
* Send the signing request to the CA
* Install the newly signed certificate in the node keystore
. Restart Elasticsearch.
The steps in this procedure use the <<keytool,`keytool`>> command-line utility.
[[installing-node-certificates]]
==== Installing Node Certificates
WARNING: Nodes that do not have SSL/TLS encryption enabled send passwords in plain text.
Node certificates should be signed by a certificate authority (CA) that is trusted by every node
in the cluster. You can use a third-party CA, your organization's existing CA, or
<<certificate-authority, set up a certificate authority>> specifically for signing node certificates.
[float]
==== Set up a keystore
When a client connects to a node using SSL/TLS, the node presents its certificate to the
client and proves that it owns the private key linked with the certificate. The client then
determines if the node's certificate is valid, trusted, and matches the hostname or IP address
it is trying to connect to. A node acts as a client when connecting to other nodes in the cluster,
so every node must trust all of the other nodes in the cluster.
These instructions show how to place a CA certificate and a certificate for the node in a single keystore.
You can optionally store the CA certificate in a separate truststore. The configuration for this is
discussed later in this section.
NOTE: While it is technically possible to use self-signed certificates, we strongly recommend using certificates signed by a CA to establish trust between nodes. Self-signed certificates must be trusted individually, which means that each node must have every other node's certificate installed. If you add a node to the cluster, you have to install the new node's self-signed certificate on all of the existing nodes and restart them. When you use CA-signed certificates, the existing nodes just need to trust the CA used to sign the new node's certificate. (You should use the same CA to sign all of your node certificates.)
First obtain the root CA certificate from your certificate authority. This certificate is used to verify that
any node certificate has been signed by the CA. Store this certificate in a keystore as a *trusted certificate*. With
the simplest configuration, Shield uses a keystore with a trusted certificate as a truststore.
To install a signed certificate, you need to:
The following shows how to create a keystore from a PEM encoded certificate. A _JKS file_ is a Java Key Store file.
It securely stores certificates.
. <<private-key, Create a keystore and generate a certificate for the node>>.
. <<generate-csr, Create a certificate signing request (CSR)>>.
. <<send-csr, Send the certificate to your CA for signing>>.
. <<install-signed-cert, Add the signed certificate to the node's keystore>>.
[source,shell]
--------------------------------------------------
keytool -importcert \
-keystore /home/es/config/node01.jks \
-file /Users/Download/cacert.pem <1>
--------------------------------------------------
<1> The Certificate Authority's own certificate.
The keytool command will prompt you for a password, which will be used to protect the integrity of the keystore. You
will need to remember this password as it will be needed for all further interactions with the keystore.
The keystore needs an update when the CA expires.
[float]
[[private-key]]
==== Generate a node private key and certificate
This step creates a private key and certificate that the node will use to identify itself. This step must
be done for every node.
`keytool -genkey` can generate a private key and certificate for your node. The following is a typical usage:
===== Creating a Keystore and Generating a Certificate
To create a keystore and generate a node certificate:
. Create a node keystore and import your CA's certificate with https://docs.oracle.com/javase/8/docs/technotes/tools/unix/keytool.html[Java Keytool]. This configures the node to trust certificates signed by the CA. For example, the following command creates a keystore for `node01` and and imports the CA certificate `cacert.pem`.
+
[source,shell]
--------------------------------------------------
keytool -genkey \
-alias node01 \ <1>
-keystore node01.jks \ <2>
-keyalg RSA \
-keysize 2048 \
-validity 712 \
-ext san=dns:node01.example.com,ip:192.168.1.1 <3>
keytool -importcert -keystore node01.jks -file cacert.pem -alias my_ca
--------------------------------------------------
<1> An alias for this public/private key-pair.
<2> The keystore for this node -- will be created.
<3> The `SubjectAlternativeName` list for this host. The '-ext' parameter is optional and can be used to specify
additional DNS names and IP Addresses that the certificate will be valid for. Multiple DNS and IP entries can
be specified by separating each entry with a comma. If this option is used, *all* names and ip addresses must
be specified in this list.
This will create an RSA public/private key-pair with a key size of 2048 bits and store them in the `node01.jks` file.
The keystore is protected with the password of `myPass`. The `712` argument specifies the number of days that the
certificate is valid for -- two years, in this example.
The tool will prompt you for information to include in the certificate.
+
The Java keystore file (.jks) securely stores certificates for the node. The CA cert must be a
PEM encoded certificate.
+
When you create a keystore, you are prompted to set a password. This password protects the
integrity of the keystore. You need to provide it whenever you interact with the keystore.
+
IMPORTANT: When the CA certificate expires, you must update the node's keystore with the new CA
certificate.
+
You can also store the CA certificate in a separate truststore. For more
information, see <<create-truststore, Configuring a Truststore>>.
. Generate a private key and certificate for the node with Java Keytool. For example, the following
command creates a key and certificate for `node01`:
+
[source,shell]
--------------------------------------------------
keytool -genkey -alias node01 -keystore node01.jks -keyalg RSA -keysize 2048 -validity 712 -ext san=dns:node01.example.com,ip:192.168.1.1
--------------------------------------------------
+
This command creates an RSA private key with a key size of 2048 bits and a public certificate that
is valid for 712 days. The key and certificate are stored in the `node01.jks` keystore.
+
The `san` value specifies all alternative names for the node. The generated certificate is valid for the DNS names and IP addresses specified as alternative names. You can specify multiple DNS or IP address entries as a comma-separated list.
+
[IMPORTANT]
.Specifying the Node Identity
==========================
An Elasticsearch node with Shield will verify the hostname contained
in the certificate of each node it connects to. Therefore it is important
that each node's certificate contains the hostname or IP address used to connect
to the node. Hostname verification can be disabled, for more information see
the <<ref-ssl-tls-settings, Configuration Parameters for TLS/SSL>> section.
With SSL/TLS is enabled, when node A connects to node B, node A normally verifies the identity of
node B by checking the identity information specified in node B's certificate. This means that you
must include node identity information when you create a node's certificate.
The recommended way to specify the node identity is by providing all names and
IP addresses of a node as a `SubjectAlternativeName` list using the the `-ext` option.
When using a commercial CA, internal DNS names and private IP addresses will not
be accepted as a `SubjectAlternativeName` due to https://cabforum.org/internal-names/[security concerns];
only publicly resolvable DNS names and IP addresses will be accepted. The use of an
internal CA is the most secure option for using private DNS names and IP addresses,
as it allows for node identity to be specified and verified. If you must use a commercial
CA and private DNS names or IP addresses, you will not be able to include the node
identity in the certificate and will need to disable <<ref-ssl-tls-settings, hostname verification>>.
The recommended way to specify the node identity when creating a certificate is to specify the
`-ext` option and list all of the node's names and IP addresses in the `san`
(Subject Alternative Name) attribute.
Another way to specify node identity is by using the `CommonName` attribute
of the certificate. The first prompt from keytool, `What is your first and last name?`,
is asking for the `CommonName` attribute of certificate. When using the `CommonName` attribute
for node identity, a DNS name must be used. The rest of the prompts by keytool are for information only.
If you use a commercial CA, the DNS names and IP addresses used to identify a node must be publicly resolvable. Internal DNS names and private IP addresses are not accepted due to
https://cabforum.org/internal-names/[security concerns].
If you need to use private DNS names and IP addresses, using an internal CA is the most secure
option. It enables you to specify node identities and ensure node identities are verified when
nodes connect. If you must use a commercial CA and private DNS names or IP addresses, you cannot
include the node identity in the certificate, so the only option is to disable
<<ref-ssl-tls-settings, hostname verification>>.
==========================
At the end, you will be prompted to optionally enter a password. The command line argument specifies the password for
the keystore. This prompt is asking if you want to set a different password that is specific to this certificate.
Doing so may provide some incremental improvement to security.
Here is a sample interaction with `keytool -genkey`
+
When you run `keytool -genkey`, Keytool prompts you for the information needed to populate the
node's distinguished name that's stored the certificate. For example:
+
[source, shell]
--------------------------------------------------
What is your first and last name?
[Unknown]: node01.example.com <1>
[Unknown]: Elasticsearch node01 <1>
What is the name of your organizational unit?
[Unknown]: test
What is the name of your organization?
@ -138,187 +113,143 @@ What is the name of your State or Province?
[Unknown]: Amsterdam
What is the two-letter country code for this unit?
[Unknown]: NL
Is CN=node01.example.com, OU=test, O=elasticsearch, L=Amsterdam, ST=Amsterdam, C=NL correct?
Is CN=Elasticsearch node01, OU=test, O=elasticsearch, L=Amsterdam, ST=Amsterdam, C=NL correct?
[no]: yes
Enter key password for <mydomain>
Enter key password for <node01> <2>
(RETURN if same as keystore password):
--------------------------------------------------
<1> The DNS name or hostname of the node must be used here if you do not specify a `SubjectAlternativeName` list using the
`-ext` option.
Now you have a certificate and private key stored in `node01.jks`.
<1> Provides information about the node that this certificate is intended for. In the past, this field specified the node's identity using a DNS name, but that behavior has been deprecated.
<2> If you don't specify a password for the certificate, the keystore password is used.
[float]
[[generate-csr]]
==== Create a certificate signing request
===== Creating a Certificate Signing Request
The next step is to get the node certificate signed by your CA. To do this you must generate a _Certificate Signing
Request_ (CSR) with the `keytool -certreq` command:
A node's certificate needs to be signed by a trusted CA for the certificate to be trusted. To get a certificate signed, you need to create a certificate signing request (CSR) and send it to your CA.
To create a CSR with Java Keytool, use the `keytool t-certreq` command. You specify the same alias, keystore, key algorithm, and DNS names and IP addresses that you used when you created the node certificate. Specify where you want to store the CSR with the `-file` option.
[source, shell]
--------------------------------------------------
keytool -certreq \
-alias node01 \ <1>
-keystore node01.jks \
-file node01.csr \
-keyalg rsa \
-ext san=dns:node01.example.com,ip:192.168.1.1 <2>
keytool -certreq -alias node01 -keystore node01.jks -file node01.csr -keyalg rsa -ext san=dns:node01.example.com,ip:192.168.1.1
--------------------------------------------------
<1> The same `alias` that you specified when creating the public/private key-pair in <<private-key>>.
<2> The `SubjectAlternativeName` list for this host. The `-ext` parameter is optional and can be used to specify
additional DNS names and IP Addresses that the certificate will be valid for. Multiple DNS and IP entries can
be specified by separating each entry with a comma. If this option is used, *all* names and ip addresses must
be specified in this list.
[float]
[[send-csr]]
===== Send the Signing Request
The resulting file -- `node01.csr` -- is your _Certificate Signing Request_, or _CSR file_.
To get a signed certificate, send the generated CSR file to your CA. The CA will sign it and send
you the signed version of the certificate.
NOTE: If you are running your own CA, see <<sign-csr, Signing CSRs>> for signing instructions.
[float]
===== Send the signing request
[[install-signed-cert]]
===== Install the Signed Certificate
Send the CSR file to the Certificate Authority for signing. The Certificate Authority will sign the certificate and
return a signed version of the certificate. See <<sign-csr>> if you are running your own Certificate Authority.
NOTE: When running multiple nodes on the same host, the same signed certificate can be used on each node or a unique
certificate can be requested per node if your CA supports multiple certificates with the same common name.
[float]
==== Install the newly signed certificate
Replace the existing unsigned certificate by importing the new signed certificate from your CA into the node keystore:
To install the signed certificate, use `keytool -importcert` to add it to the node's keystore. You
specify the same alias and keystore that you used when you created the node certificate.
[source, shell]
--------------------------------------------------
keytool -importcert \
-keystore node01.jks \
-file node01-signed.crt \ <1>
-alias node01 <2>
keytool -importcert -keystore node01.jks -file node01-signed.crt -alias node01
--------------------------------------------------
<1> This name of the signed certificate file that you received from the CA.
<2> The `alias` must be the same as the alias that you used in <<private-key>>.
[NOTE]
==========================
If you attempt to import a PEM-encoded certificate that contains extra text headers, you might get
the error: `java.security.cert.CertificateParsingException: invalid DER-encoded certificate data`.
Use the following `openssl` command to remove the extra headers and then use `keytool` to import
the certificate.
NOTE: keytool confuses some PEM-encoded certificates with extra text headers as DER-encoded certificates, giving
this error: `java.security.cert.CertificateParsingException: invalid DER-encoded certificate data`. The text information
can be deleted from the certificate. The following openssl command will remove the text headers:
[source, shell]
--------------------------------------------------
openssl x509 -in node01-signed.crt -out node01-signed-noheaders.crt
--------------------------------------------------
==========================
[float]
==== Configure the keystores and enable SSL
[[enable-ssl]]
==== Enabling SSL in the Node Configuration
NOTE: All ssl related node settings that are considered to be highly sensitive and therefore are not exposed via the
{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
Once you have added the signed certificate to the node's keystore, you need to modify the node
configuration to enable SSL.
NOTE: All SSL/TLS related node settings that are considered to be highly sensitive and therefore
are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
You need to configure the node to enable SSL, identify itself using
its signed certificate, and verify the identify of incoming connections.
The settings below should be added to the main `elasticsearch.yml` config file.
[float]
===== Node identity
The `node01.jks` contains the certificate that `node01` will use to identify
itself to other nodes in the cluster, to transport clients, and to HTTPS
clients. Add the following to `elasticsearch.yml`:
[[configure-ssl]]
To enable SSL, make the following changes in `elasticsearch.yml`:
. Specify the location of the node's keystore and the password(s) needed to access the node's
certificate. For example:
+
[source, yaml]
--------------------------------------------------
shield.ssl.keystore.path: /home/es/config/node01.jks <1>
shield.ssl.keystore.password: myPass <2>
shield.ssl.keystore.key_password: myKeyPass <3>
--------------------------------------------------
<1> The full path to the node keystore file.
<2> The password used to decrypt the `node01.jks` keystore.
If you specified a different password than the keystore password when executing the `keytool -genkey` command, you will
need to specify that password in the `elasticsearch.yml` configuration file:
[source, yaml]
--------------------------------------------------
shield.ssl.keystore.key_password: myKeyPass <1>
--------------------------------------------------
<1> The password entered at the end of the `keytool -genkey` command
[float]
[[create-truststore]]
===== Optional truststore configuration
The truststore holds the trusted CA certificates. Shield will use the keystore as the truststore
by default. You can optionally provide a separate path for the truststore. In this case, Shield
will use the keystore for the node's private key and the configured truststore for trusted certificates.
First obtain the CA certificates that will be trusted. Each of these certificates need to be imported into a truststore
by running the following command for each CA certificate:
[source,shell]
--------------------------------------------------
keytool -importcert \
-keystore /home/es/config/truststore.jks \ <1>
-file /Users/Download/cacert.pem <2>
--------------------------------------------------
<1> The full path to the truststore file. If the file does not exist it will be created.
<2> A trusted CA certificate.
The keytool command will prompt you for a password, which will be used to protect the integrity of the truststore. You
will need to remember this password as it will be needed for all further interactions with the truststore.
Add the following to `elasticsearch.yml`:
[source, yaml]
--------------------------------------------------
shield.ssl.truststore.path: /home/es/config/truststore.jks <1>
shield.ssl.truststore.password: myPass <2>
--------------------------------------------------
<1> The full path to the truststore file.
<2> The password used to decrypt the `truststore.jks` keystore.
[float]
[[ssl-transport]]
==== Enable SSL on the transport layer
Enable SSL on the transport networking layer to ensure that communication between nodes is encrypted. Add the following
value to the `elasticsearch.yml` configuration file:
<2> The password used to access the keystore.
<3> The password used to access the certificate. This is only required if you specified a separate
certificate password when generating the certificate.
. Enable SSL on the transport networking layer to ensure that communication between nodes is
encrypted:
+
[source, yaml]
--------------------------------------------------
shield.transport.ssl: true
--------------------------------------------------
+
NOTE: Transport clients can only connect to the cluster with a valid username and password even if
this setting is disabled.
Regardless of this setting, transport clients can only connect to the cluster with a valid username and password.
[float]
[[disable-multicast]]
==== Disable multicast
Multicast {ref}/modules-discovery.html[discovery] is
not supported with shield. To properly secure node communications, disable multicast by setting the following values
in the `elasticsearch.yml` configuration file:
. Enable SSL on the HTTP layer to ensure that communication between HTTP clients and the cluster is encrypted:
+
[source, yaml]
--------------------------------------------------
shield.http.ssl: true
--------------------------------------------------
+
NOTE: HTTP clients can only connect to the cluster with a valid username and password even if this
setting is disabled.
. Disable {ref}/modules-discovery.html[multicast discovery]:
+
[source, yaml]
--------------------------------------------------
discovery.zen.ping.multicast.enabled: false
discovery.zen.ping.unicast.hosts: ["node01:9300", "node02:9301"]
--------------------------------------------------
You can learn more about unicast configuration in the {ref}/modules-discovery.html[Zen Discovery] documentation.
. Restart Elasticsearch so these configuration changes take effect.
[float]
[[ssl-http]]
==== Enable SSL on the HTTP layer
[[create-truststore]]
==== Configuring a Separate Truststore
You can store trusted CA certificates in a node's keystore, or create a separate truststore for CA
certificates.
SSL should be enabled on the HTTP networking layer to ensure that communication between HTTP clients and the cluster is
encrypted:
To use a separate truststore:
. Create a node truststore and import the CA certificate(s) you want to trust with Java Keytool. For example, the following command imports the CA certificate `cacert.pem` into `truststore.jks`. If the specified truststore doesn't exist, it is created.
+
[source,shell]
--------------------------------------------------
keytool -importcert -keystore /home/es/config/truststore.jks -file /Users/Download/cacert.pem
--------------------------------------------------
+
When you create a truststore, you are prompted to set a password. This password protects the
integrity of the truststore. You need to provide it whenever you interact with the truststore.
. In `elasticsearch.yml`, specify the location of the node's truststore and the password needed to
access it. For example:
+
[source, yaml]
--------------------------------------------------
shield.http.ssl: true
shield.ssl.truststore.path: /home/es/config/truststore.jks <1>
shield.ssl.truststore.password: myPass <2>
--------------------------------------------------
Regardless of this setting, HTTP clients can only connect to the cluster with a valid username and password.
Congratulations! At this point, you have a node with encryption enabled for both HTTPS and the transport layers.
Your node will correctly present its certificate to other nodes or clients when connecting. There are optional,
more advanced features you may use to further configure or protect your node. They are described in the following
paragraphs.
<1> The full path to the truststore file.
<2> The password needed to access the truststore.

View File

@ -64,7 +64,7 @@ shield:
| `group_search.base_dn` | no | Specifies the context to search for groups in which the user has membership. The default value for this element is the root of the Active Directory domain.
| `group_search.scope` | no | Specifies whether the group search should be `sub_tree` (default), `one_level` or `base`. `one_level` searches for groups directly contained within the `base_dn`. `sub_tree` searches all objects contained under `base_dn`. `base` specifies that the `base_dn` is a group object, and that it is the only group considered.
| `unmapped_groups_as_roles` | no | When set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. The default value is `false`.
| `files.role_mapping` | no | Specifies the path and file name for the <<ad-role-mapping, YAML role mapping configuration file>>. By default it is `ES_HOME/config/shield/users/role_mapping.yml`.
| `files.role_mapping` | no | Specifies the <<ref-shield-files-location,location>> for the <<ad-role-mapping, YAML role mapping configuration file>>. By default it is `CONFIG_DIR/shield/users/role_mapping.yml`.
| `follow_referrals` | no | Boolean value that specifies whether Shield should follow referrals returned by the LDAP server. Referrals are URLs returned by the server that are to be used to continue the LDAP operation (e.g. search). Default is `true`.
| `hostname_verification` | no | When set to `true`, hostname verification will be performed when connecting to a LDAP server. The hostname or IP address used in the `url` must match one of the names in the certificate or the connection will not be allowed. Defaults to `true`.
| `cache.ttl` | no | Specified the time-to-live for cached user entries (a user and its credentials will be cached for this configured period of time). Defaults to `20m` (use the standard Elasticsearch {ref}/common-options.html#time-units[time units])

View File

@ -29,8 +29,8 @@ shield:
| `type` | yes | Indicates the realm type and must be set to `esusers`.
| `order` | no | Indicates the priority of this realm within the realm chain. Realms with lower order will be consulted first. Although not required, it is highly recommended to explicitly set this value when multiple realms are configured. Defaults to `Integer.MAX_VALUE`.
| `enabled` | no | Indicates whether this realm is enabled/disabled. Provides an easy way to disable realms in the chain without removing their configuration. Defaults to `true`.
| `files.users` | no | Points to the location of the `users` file where the users and their passwords are stored. By default, it is `ES_HOME/config/shield/users`.
| `files.users_roles` | no | Points to the location of the `users_roles` file where the users and their roles are stored. By default, it is `ES_HOME/config/shield/users_roles`.
| `files.users` | no | Points to the <<ref-shield-files-location,location>> of the `users` file where the users and their passwords are stored. By default, it is `CONFIG_DIR/shield/users`.
| `files.users_roles` | no | Points to the <<ref-shield-files-location,location>> of the `users_roles` file where the users and their roles are stored. By default, it is `CONFIG_DIR/shield/users_roles`.
| `cache.ttl` | no | Specified the time-to-live for cached user entries (a user and its credentials will be cached for this configured period of time). Defaults to `20m` (use the standard Elasticsearch {ref}/common-options.html#time-units[time units]).
| `cache.max_users` | no | Specified the maximum number of user entries that can live in the cache at a given time. Defaults to 100,000.
| `cache.hash_algo` | no | (Expert Setting) Specifies the hashing algorithm that will be used for the in-memory cached user credentials (see <<esusers-cache-hash-algo,here>> for possible values).

View File

@ -101,7 +101,7 @@ shield:
| `unmapped_groups_as_roles` | no | When set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. The default value is `false`.
| `connect_timeout` | no | The timeout period for establishing an LDAP connection. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to "5s" - for 5 seconds
| `read_timeout` | no | The timeout period for an LDAP operation. An `s` at the end indicates seconds, or `ms` indicates milliseconds. Defaults to "5s" - for 5 seconds
| `files.role_mapping` | no | Specifies the path and file name for the <<ldap-role-mapping, YAML role mapping configuration file>>. By default, it is `ES_HOME/config/shield/role_mapping.yml`.
| `files.role_mapping` | no | Specifies the <<ref-shield-files-location,location>> for the <<ldap-role-mapping, YAML role mapping configuration file>>. By default, it is `CONFIG_DIR/shield/role_mapping.yml`.
| `follow_referrals` | no | Boolean value that specifies whether Shield should follow referrals returned by the LDAP server. Referrals are URLs returned by the server that are to be used to continue the LDAP operation (e.g. search). Default is `true`.
| `hostname_verification` | no | When set to `true`, hostname verification will be performed when connecting to a LDAP server. The hostname or IP address used in the `url` must match one of the names in the certificate or the connection will not be allowed. Defaults to `true`.
| `cache.ttl` | no | Specified the time-to-live for cached user entries (a user and its credentials will be cached for this configured period of time). Defaults to `20m` (use the standard Elasticsearch {ref}/common-options.html#time-units[time units]).

View File

@ -73,6 +73,6 @@ shield:
| `truststore.path` | no | The path of a truststore to use. The default truststore is the one defined by <<ref-ssl-tls-settings,SSL/TLS settings>>
| `truststore.password` | no | The password to the truststore. Must be provided if `truststore.path` is set.
| `truststore.algorithm` | no | Algorithm for the trustsore. Default is `SunX509`
| `files.role_mapping` | no | Specifies the path and file name for the <<pki-role-mapping, YAML role mapping configuration file>>. By default, it is `ES_HOME/config/shield/role_mapping.yml`.
| `files.role_mapping` | no | Specifies the <<ref-shield-files-location,location>> for the <<pki-role-mapping, YAML role mapping configuration file>>. By default, it is `CONFIG_DIR/shield/role_mapping.yml`.
|=======================

Some files were not shown because too many files have changed in this diff Show More