Merge branch 'master' into reporting-fix/use-absolute-times
Original commit: elastic/x-pack-elasticsearch@bdc85eb9fb
This commit is contained in:
commit
38f87b59be
|
@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
|
@ -22,7 +23,6 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
@ -83,12 +83,9 @@ public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase {
|
|||
|
||||
@Before
|
||||
public void enableExporter() throws Exception {
|
||||
InetSocketAddress httpAddress = randomFrom(httpAddresses());
|
||||
URI uri = new URI("https", null, httpAddress.getHostString(), httpAddress.getPort(), "/", null, null);
|
||||
|
||||
Settings exporterSettings = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.enabled", true)
|
||||
.put("xpack.monitoring.exporters._http.host", uri.toString())
|
||||
.put("xpack.monitoring.exporters._http.host", "https://" + NetworkAddress.format(randomFrom(httpAddresses())))
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
}
|
||||
|
|
|
@ -70,6 +70,8 @@
|
|||
- match: { _id: "watch_with_groovy_closure" }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- '[groovy] scripts are deprecated, use [painless] scripts instead'
|
||||
xpack.watcher.execute_watch:
|
||||
id: "watch_with_groovy_closure"
|
||||
body: >
|
||||
|
@ -118,6 +120,8 @@
|
|||
{ "status": "red", "@timestamp": "2005-01-01T00:01:55" }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- '[groovy] scripts are deprecated, use [painless] scripts instead'
|
||||
xpack.watcher.execute_watch:
|
||||
id: "watch_with_groovy_closure"
|
||||
body: >
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# Creates indices with old versions of elasticsearch. These indices are used by x-pack plugins like security
|
||||
# to test if the import of metadata that is stored in elasticsearch indexes works correctly.
|
||||
# This tool will start a node on port 9200/9300. If a node is already running on that port then the script will fail.
|
||||
# Currently this script can only deal with versions >=2.3X and < 5.0. Needs more work for versions before or after.
|
||||
# Currently this script can only deal with versions >=2.0.0 and < 5.0. Needs more work for versions before or after.
|
||||
#
|
||||
# Run from x-plugins root directory like so:
|
||||
# python3 ./elasticsearch/x-dev-tools/create_bwc_indexes.py 2.3.4
|
||||
|
@ -50,6 +50,7 @@ try:
|
|||
from elasticsearch import Elasticsearch
|
||||
from elasticsearch.exceptions import ConnectionError
|
||||
from elasticsearch.exceptions import TransportError
|
||||
from elasticsearch.exceptions import NotFoundError
|
||||
from elasticsearch.client import IndicesClient
|
||||
except ImportError as e:
|
||||
print('Can\'t import elasticsearch please install `sudo pip3 install elasticsearch`')
|
||||
|
@ -80,7 +81,10 @@ def start_node(version, release_dir, data_dir):
|
|||
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
def install_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'install', [plugin_name])
|
||||
args = [plugin_name]
|
||||
if parse_version(version) >= parse_version('2.2.0'):
|
||||
args = [plugin_name, '--batch']
|
||||
run_plugin(version, release_dir, 'install', args)
|
||||
|
||||
def remove_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'remove', [plugin_name])
|
||||
|
@ -96,9 +100,8 @@ def create_client():
|
|||
client = Elasticsearch([{'host': 'localhost', 'port': 9200, 'http_auth':'es_admin:0123456789'}])
|
||||
health = client.cluster.health(wait_for_nodes=1)
|
||||
return client
|
||||
except Exception as e:
|
||||
logging.info('got exception while waiting for cluster' + str(e))
|
||||
pass
|
||||
except ConnectionError:
|
||||
logging.info('Not started yet...')
|
||||
time.sleep(1)
|
||||
assert False, 'Timed out waiting for node for %s seconds' % timeout
|
||||
|
||||
|
@ -113,11 +116,17 @@ def generate_security_index(client, version):
|
|||
"roles" : [ "bwc_test_role" ]
|
||||
}
|
||||
|
||||
response = requests.put('http://localhost:9200/_shield/user/bwc_test_user', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('put user reponse: ' + response.text)
|
||||
if (response.status_code != 200) :
|
||||
while True:
|
||||
response = requests.put('http://localhost:9200/_shield/user/bwc_test_user', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('put user reponse: ' + response.text)
|
||||
if response.status_code == 200:
|
||||
break
|
||||
else:
|
||||
if 'service has not been started' in response.text:
|
||||
continue
|
||||
raise Exception('PUT http://localhost:9200/_shield/role/bwc_test_role did not succeed!')
|
||||
|
||||
|
||||
# add a role
|
||||
body = {
|
||||
"cluster": ["all"],
|
||||
|
@ -154,6 +163,154 @@ def generate_security_index(client, version):
|
|||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.security')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
# this adds a couple of watches and waits for the the watch_history to accumulate some results
|
||||
def generate_watcher_index(client, version):
|
||||
logging.info('Adding a watch')
|
||||
body = {
|
||||
"trigger" : {
|
||||
"schedule": {
|
||||
"interval": "1s"
|
||||
}
|
||||
},
|
||||
"input" : {
|
||||
"search" : {
|
||||
"timeout": "100s",
|
||||
"request" : {
|
||||
"indices" : [ ".watches" ],
|
||||
"body" : {
|
||||
"query" : { "match_all" : {}},
|
||||
"size": 1
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
"condition" : {
|
||||
"always" : {}
|
||||
},
|
||||
"throttle_period": "1s",
|
||||
"actions" : {
|
||||
"index_payload" : {
|
||||
"transform" : {
|
||||
"search" : {
|
||||
"request" : {
|
||||
"body" : { "size": 1, "query" : { "match_all" : {} }}
|
||||
},
|
||||
"timeout": "100s"
|
||||
}
|
||||
},
|
||||
"index" : {
|
||||
"index" : "bwc_watch_index",
|
||||
"doc_type" : "bwc_watch_type",
|
||||
"timeout": "100s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
response = requests.put('http://localhost:9200/_watcher/watch/bwc_watch', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('PUT watch response: ' + response.text)
|
||||
if (response.status_code != 201) :
|
||||
raise Exception('PUT http://localhost:9200/_watcher/watch/bwc_watch did not succeed!')
|
||||
|
||||
logging.info('Adding a watch with "fun" throttle periods')
|
||||
body = {
|
||||
"trigger" : {
|
||||
"schedule": {
|
||||
"interval": "1s"
|
||||
}
|
||||
},
|
||||
"condition" : {
|
||||
"never" : {}
|
||||
},
|
||||
"throttle_period": "100s",
|
||||
"actions" : {
|
||||
"index_payload" : {
|
||||
"throttle_period": "100s",
|
||||
"transform" : {
|
||||
"search" : {
|
||||
"request" : {
|
||||
"body" : { "size": 1, "query" : { "match_all" : {} }}
|
||||
}
|
||||
}
|
||||
},
|
||||
"index" : {
|
||||
"index" : "bwc_watch_index",
|
||||
"doc_type" : "bwc_watch_type"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
response = requests.put('http://localhost:9200/_watcher/watch/bwc_throttle_period', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('PUT watch response: ' + response.text)
|
||||
if (response.status_code != 201) :
|
||||
raise Exception('PUT http://localhost:9200/_watcher/watch/bwc_throttle_period did not succeed!')
|
||||
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('Skipping watch with a funny read timeout because email attachement is not supported by this version')
|
||||
else:
|
||||
logging.info('Adding a watch with a funny read timeout')
|
||||
body = {
|
||||
"trigger" : {
|
||||
"schedule": {
|
||||
"interval": "100s"
|
||||
}
|
||||
},
|
||||
"condition": {
|
||||
"never": {}
|
||||
},
|
||||
"actions": {
|
||||
"work": {
|
||||
"email": {
|
||||
"to": "email@domain.com",
|
||||
"subject": "Test Kibana PDF report",
|
||||
"attachments": {
|
||||
"test_report.pdf": {
|
||||
"http": {
|
||||
"content_type": "application/pdf",
|
||||
"request": {
|
||||
"read_timeout": "100s",
|
||||
"scheme": "https",
|
||||
"host": "example.com",
|
||||
"path":"{{ctx.metadata.report_url}}",
|
||||
"port": 8443,
|
||||
"auth": {
|
||||
"basic": {
|
||||
"username": "Aladdin",
|
||||
"password": "open sesame"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
response = requests.put('http://localhost:9200/_watcher/watch/bwc_funny_timeout', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('PUT watch response: ' + response.text)
|
||||
if (response.status_code != 201) :
|
||||
raise Exception('PUT http://localhost:9200/_watcher/watch/bwc_funny_timeout did not succeed!')
|
||||
|
||||
# wait to accumulate some watches
|
||||
logging.info('Waiting for watch results index to fill up...')
|
||||
for attempt in range(1, 31):
|
||||
try:
|
||||
response = client.search(index="bwc_watch_index", body={"query": {"match_all": {}}})
|
||||
logging.info('(' + str(attempt) + ') Got ' + str(response['hits']['total']) + ' hits and want 10...')
|
||||
if response['hits']['total'] >= 10:
|
||||
break
|
||||
except NotFoundError:
|
||||
logging.info('(' + str(attempt) + ') Not found, retrying')
|
||||
time.sleep(1)
|
||||
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.watches')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.watch_history*')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='bwc_watch_index')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
|
||||
def compress_index(version, tmp_dir, output_dir):
|
||||
compress(tmp_dir, output_dir, 'x-pack-%s.zip' % version, 'data')
|
||||
|
||||
|
@ -232,50 +389,52 @@ def main():
|
|||
logging.getLogger('urllib3').setLevel(logging.WARN)
|
||||
cfg = parse_config()
|
||||
for version in cfg.versions:
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('version is ' + version + ' but shield supports native realm oly from 2.3.0 on. nothing to do.')
|
||||
continue
|
||||
else:
|
||||
logging.info('--> Creating x-pack index for %s' % version)
|
||||
logging.info('--> Creating x-pack index for %s' % version)
|
||||
|
||||
# setup for starting nodes
|
||||
release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
raise RuntimeError('ES version %s does not exist in %s' % (version, cfg.releases_dir))
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
node = None
|
||||
# setup for starting nodes
|
||||
release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
raise RuntimeError('ES version %s does not exist in %s' % (version, cfg.releases_dir))
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
node = None
|
||||
|
||||
try:
|
||||
try:
|
||||
|
||||
# install plugins
|
||||
remove_plugin(version, release_dir, 'license')
|
||||
remove_plugin(version, release_dir, 'shield')
|
||||
# remove the shield config too before fresh install
|
||||
run('rm -rf %s' %(os.path.join(release_dir, 'config/shield')))
|
||||
install_plugin(version, release_dir, 'license')
|
||||
install_plugin(version, release_dir, 'shield')
|
||||
# here we could also install watcher etc
|
||||
# install plugins
|
||||
remove_plugin(version, release_dir, 'license')
|
||||
remove_plugin(version, release_dir, 'shield')
|
||||
remove_plugin(version, release_dir, 'watcher')
|
||||
# remove the shield config too before fresh install
|
||||
run('rm -rf %s' %(os.path.join(release_dir, 'config/shield')))
|
||||
install_plugin(version, release_dir, 'license')
|
||||
install_plugin(version, release_dir, 'shield')
|
||||
install_plugin(version, release_dir, 'watcher')
|
||||
# here we could also install watcher etc
|
||||
|
||||
# create admin
|
||||
run('%s useradd es_admin -r admin -p 0123456789' %(os.path.join(release_dir, 'bin/shield/esusers')))
|
||||
node = start_node(version, release_dir, data_dir)
|
||||
# create admin
|
||||
run('%s useradd es_admin -r admin -p 0123456789' %(os.path.join(release_dir, 'bin/shield/esusers')))
|
||||
node = start_node(version, release_dir, data_dir)
|
||||
|
||||
# create a client that authenticates as es_admin
|
||||
client = create_client()
|
||||
# create a client that authenticates as es_admin
|
||||
client = create_client()
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('Version is ' + version + ' but shield supports native realm oly from 2.3.0 on. Nothing to do for Shield.')
|
||||
else:
|
||||
generate_security_index(client, version)
|
||||
# here we could also add watches, monitoring etc
|
||||
generate_watcher_index(client, version)
|
||||
# here we could also add watches, monitoring etc
|
||||
|
||||
shutdown_node(node)
|
||||
node = None
|
||||
compress_index(version, tmp_dir, cfg.output_dir)
|
||||
finally:
|
||||
|
||||
if node is not None:
|
||||
# This only happens if we've hit an exception:
|
||||
shutdown_node(node)
|
||||
node = None
|
||||
compress_index(version, tmp_dir, cfg.output_dir)
|
||||
finally:
|
||||
|
||||
if node is not None:
|
||||
# This only happens if we've hit an exception:
|
||||
shutdown_node(node)
|
||||
shutil.rmtree(tmp_dir)
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
|
|
|
@ -51,6 +51,10 @@ dependencies {
|
|||
// needed for subethasmtp, has @GuardedBy annotation
|
||||
testCompile 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
|
||||
// monitoring deps
|
||||
compile "org.elasticsearch.client:rest:${version}"
|
||||
compile "org.elasticsearch.client:sniffer:${version}"
|
||||
|
||||
// common test deps
|
||||
testCompile 'org.elasticsearch:securemock:1.2'
|
||||
testCompile 'org.slf4j:slf4j-log4j12:1.6.2'
|
||||
|
|
|
@ -12,7 +12,6 @@ import org.elasticsearch.common.inject.Module;
|
|||
import org.elasticsearch.common.inject.util.Providers;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.license.LicenseService;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -63,15 +62,13 @@ public class Monitoring implements ActionPlugin {
|
|||
public static final String NAME = "monitoring";
|
||||
|
||||
private final Settings settings;
|
||||
private final Environment env;
|
||||
private final XPackLicenseState licenseState;
|
||||
private final boolean enabled;
|
||||
private final boolean transportClientMode;
|
||||
private final boolean tribeNode;
|
||||
|
||||
public Monitoring(Settings settings, Environment env, XPackLicenseState licenseState) {
|
||||
public Monitoring(Settings settings, XPackLicenseState licenseState) {
|
||||
this.settings = settings;
|
||||
this.env = env;
|
||||
this.licenseState = licenseState;
|
||||
this.enabled = XPackSettings.MONITORING_ENABLED.get(settings);
|
||||
this.transportClientMode = XPackPlugin.transportClientMode(settings);
|
||||
|
@ -107,10 +104,10 @@ public class Monitoring implements ActionPlugin {
|
|||
final MonitoringSettings monitoringSettings = new MonitoringSettings(settings, clusterSettings);
|
||||
final CleanerService cleanerService = new CleanerService(settings, clusterSettings, threadPool, licenseState);
|
||||
|
||||
// TODO do exporters and their ssl config really need to be dynamic? https://github.com/elastic/x-plugins/issues/3117
|
||||
// TODO: https://github.com/elastic/x-plugins/issues/3117 (remove dynamic need with static exporters)
|
||||
final SSLService dynamicSSLService = sslService.createDynamicSSLService();
|
||||
Map<String, Exporter.Factory> exporterFactories = new HashMap<>();
|
||||
exporterFactories.put(HttpExporter.TYPE, config -> new HttpExporter(config, env, dynamicSSLService));
|
||||
exporterFactories.put(HttpExporter.TYPE, config -> new HttpExporter(config, dynamicSSLService));
|
||||
exporterFactories.put(LocalExporter.TYPE, config -> new LocalExporter(config, client, clusterService, cleanerService));
|
||||
final Exporters exporters = new Exporters(settings, exporterFactories, clusterService);
|
||||
|
||||
|
|
|
@ -7,13 +7,17 @@ package org.elasticsearch.xpack.monitoring.collector.cluster;
|
|||
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ClusterInfoMonitoringDoc extends MonitoringDoc {
|
||||
|
||||
private String clusterName;
|
||||
private String version;
|
||||
private License license;
|
||||
private List<XPackFeatureSet.Usage> usage;
|
||||
private ClusterStatsResponse clusterStats;
|
||||
|
||||
public ClusterInfoMonitoringDoc(String monitoringId, String monitoringVersion) {
|
||||
|
@ -44,6 +48,14 @@ public class ClusterInfoMonitoringDoc extends MonitoringDoc {
|
|||
this.license = license;
|
||||
}
|
||||
|
||||
public List<XPackFeatureSet.Usage> getUsage() {
|
||||
return usage;
|
||||
}
|
||||
|
||||
public void setUsage(List<XPackFeatureSet.Usage> usage) {
|
||||
this.usage = usage;
|
||||
}
|
||||
|
||||
public ClusterStatsResponse getClusterStats() {
|
||||
return clusterStats;
|
||||
}
|
||||
|
|
|
@ -13,10 +13,13 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.license.LicenseService;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.action.XPackUsageRequestBuilder;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
import org.elasticsearch.xpack.monitoring.collector.AbstractCollector;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
@ -60,25 +63,17 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect() throws Exception {
|
||||
List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
final Supplier<ClusterStatsResponse> clusterStatsSupplier =
|
||||
() -> client.admin().cluster().prepareClusterStats().get(monitoringSettings.clusterStatsTimeout());
|
||||
final Supplier<List<XPackFeatureSet.Usage>> usageSupplier = () -> new XPackUsageRequestBuilder(client).get().getUsages();
|
||||
|
||||
// Retrieves cluster stats
|
||||
ClusterStatsResponse clusterStats = null;
|
||||
try {
|
||||
clusterStats = client.admin().cluster().prepareClusterStats().get(monitoringSettings.clusterStatsTimeout());
|
||||
} catch (ElasticsearchSecurityException e) {
|
||||
if (LicenseUtils.isLicenseExpiredException(e)) {
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"collector [{}] - unable to collect data because of expired license", name()), e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
final ClusterStatsResponse clusterStats = clusterStatsSupplier.get();
|
||||
|
||||
long timestamp = System.currentTimeMillis();
|
||||
String clusterUUID = clusterUUID();
|
||||
DiscoveryNode sourceNode = localNode();
|
||||
final long timestamp = System.currentTimeMillis();
|
||||
final String clusterUUID = clusterUUID();
|
||||
final DiscoveryNode sourceNode = localNode();
|
||||
|
||||
final List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
|
||||
// Adds a cluster info document
|
||||
ClusterInfoMonitoringDoc clusterInfoDoc = new ClusterInfoMonitoringDoc(monitoringId(), monitoringVersion());
|
||||
|
@ -89,6 +84,7 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
clusterInfoDoc.setVersion(Version.CURRENT.toString());
|
||||
clusterInfoDoc.setLicense(licenseService.getLicense());
|
||||
clusterInfoDoc.setClusterStats(clusterStats);
|
||||
clusterInfoDoc.setUsage(collect(usageSupplier));
|
||||
results.add(clusterInfoDoc);
|
||||
|
||||
// Adds a cluster stats document
|
||||
|
@ -103,4 +99,21 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
|
||||
return Collections.unmodifiableCollection(results);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private <T> T collect(final Supplier<T> supplier) {
|
||||
try {
|
||||
return supplier.get();
|
||||
} catch (ElasticsearchSecurityException e) {
|
||||
if (LicenseUtils.isLicenseExpiredException(e)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage(
|
||||
"collector [{}] - unable to collect data because of expired license", name()), e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.monitoring.exporter;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
|
@ -18,11 +19,15 @@ public abstract class ExportBulk {
|
|||
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZING);
|
||||
|
||||
public ExportBulk(String name) {
|
||||
this.name = name;
|
||||
this.name = Objects.requireNonNull(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
/**
|
||||
* Get the name used for any logging messages.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,11 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
|
@ -25,7 +21,6 @@ public abstract class Exporter implements AutoCloseable {
|
|||
public static final String EXPORT_PIPELINE_NAME = "xpack_monitoring_" + MonitoringTemplateUtils.TEMPLATE_VERSION;
|
||||
|
||||
public static final String INDEX_NAME_TIME_FORMAT_SETTING = "index.name.time_format";
|
||||
public static final String BULK_TIMEOUT_SETTING = "bulk.timeout";
|
||||
/**
|
||||
* Every {@code Exporter} adds the ingest pipeline to bulk requests, but they should, at the exporter level, allow that to be disabled.
|
||||
* <p>
|
||||
|
@ -34,16 +29,11 @@ public abstract class Exporter implements AutoCloseable {
|
|||
public static final String USE_INGEST_PIPELINE_SETTING = "use_ingest";
|
||||
|
||||
protected final Config config;
|
||||
protected final Logger logger;
|
||||
|
||||
@Nullable protected final TimeValue bulkTimeout;
|
||||
|
||||
private AtomicBoolean closed = new AtomicBoolean(false);
|
||||
|
||||
public Exporter(Config config) {
|
||||
this.config = config;
|
||||
this.logger = config.logger(getClass());
|
||||
this.bulkTimeout = config.settings().getAsTime(BULK_TIMEOUT_SETTING, null);
|
||||
}
|
||||
|
||||
public String name() {
|
||||
|
@ -82,7 +72,11 @@ public abstract class Exporter implements AutoCloseable {
|
|||
|
||||
protected abstract void doClose();
|
||||
|
||||
protected String settingFQN(String setting) {
|
||||
protected static String settingFQN(final Config config) {
|
||||
return MonitoringSettings.EXPORTERS_SETTINGS.getKey() + config.name;
|
||||
}
|
||||
|
||||
protected static String settingFQN(final Config config, final String setting) {
|
||||
return MonitoringSettings.EXPORTERS_SETTINGS.getKey() + config.name + "." + setting;
|
||||
}
|
||||
|
||||
|
@ -119,13 +113,11 @@ public abstract class Exporter implements AutoCloseable {
|
|||
private final String name;
|
||||
private final String type;
|
||||
private final boolean enabled;
|
||||
private final Settings globalSettings;
|
||||
private final Settings settings;
|
||||
|
||||
public Config(String name, String type, Settings globalSettings, Settings settings) {
|
||||
public Config(String name, String type, Settings settings) {
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.globalSettings = globalSettings;
|
||||
this.settings = settings;
|
||||
this.enabled = settings.getAsBoolean("enabled", true);
|
||||
}
|
||||
|
@ -146,9 +138,6 @@ public abstract class Exporter implements AutoCloseable {
|
|||
return settings;
|
||||
}
|
||||
|
||||
public Logger logger(Class clazz) {
|
||||
return Loggers.getLogger(clazz, globalSettings, name);
|
||||
}
|
||||
}
|
||||
|
||||
/** A factory for constructing {@link Exporter} instances.*/
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
|||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.local.LocalExporter;
|
||||
|
||||
|
@ -117,11 +116,6 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
}
|
||||
|
||||
Map<String, Exporter> initExporters(Settings settings) {
|
||||
Settings globalSettings = Settings.builder()
|
||||
.put(settings)
|
||||
.put(Node.NODE_NAME_SETTING.getKey(), nodeName())
|
||||
.build();
|
||||
|
||||
Set<String> singletons = new HashSet<>();
|
||||
Map<String, Exporter> exporters = new HashMap<>();
|
||||
boolean hasDisabled = false;
|
||||
|
@ -135,7 +129,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
if (factory == null) {
|
||||
throw new SettingsException("unknown exporter type [" + type + "] set for exporter [" + name + "]");
|
||||
}
|
||||
Exporter.Config config = new Exporter.Config(name, type, globalSettings, exporterSettings);
|
||||
Exporter.Config config = new Exporter.Config(name, type, exporterSettings);
|
||||
if (!config.enabled()) {
|
||||
hasDisabled = true;
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -162,8 +156,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
// fallback on the default
|
||||
//
|
||||
if (exporters.isEmpty() && !hasDisabled) {
|
||||
Exporter.Config config = new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE,
|
||||
globalSettings, Settings.EMPTY);
|
||||
Exporter.Config config = new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE, Settings.EMPTY);
|
||||
exporters.put(config.name(), factories.get(LocalExporter.TYPE).create(config));
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.ExportBulk;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.ExportException;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* {@code HttpExportBulk} uses the {@link RestClient} to perform a bulk operation against the remote cluster.
|
||||
*/
|
||||
class HttpExportBulk extends ExportBulk {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(HttpExportBulk.class);
|
||||
|
||||
/**
|
||||
* The {@link RestClient} managed by the {@link HttpExporter}.
|
||||
*/
|
||||
private final RestClient client;
|
||||
|
||||
/**
|
||||
* The querystring parameters to pass along with every bulk request.
|
||||
*/
|
||||
private final Map<String, String> params;
|
||||
|
||||
/**
|
||||
* Resolvers are used to render monitoring documents into JSON.
|
||||
*/
|
||||
private final ResolversRegistry registry;
|
||||
|
||||
/**
|
||||
* The bytes payload that represents the bulk body is created via {@link #doAdd(Collection)}.
|
||||
*/
|
||||
private byte[] payload = null;
|
||||
|
||||
public HttpExportBulk(final String name, final RestClient client, final Map<String, String> parameters,
|
||||
final ResolversRegistry registry) {
|
||||
super(name);
|
||||
|
||||
this.client = client;
|
||||
this.params = parameters;
|
||||
this.registry = registry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doAdd(Collection<MonitoringDoc> docs) throws ExportException {
|
||||
try {
|
||||
if (docs != null && docs.isEmpty() == false) {
|
||||
try (final BytesStreamOutput payload = new BytesStreamOutput()) {
|
||||
for (MonitoringDoc monitoringDoc : docs) {
|
||||
// any failure caused by an individual doc will be written as an empty byte[], thus not impacting the rest
|
||||
payload.write(toBulkBytes(monitoringDoc));
|
||||
}
|
||||
|
||||
// store the payload until we flush
|
||||
this.payload = BytesReference.toBytes(payload.bytes());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ExportException("failed to add documents to export bulk [{}]", e, name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doFlush() throws ExportException {
|
||||
if (payload == null) {
|
||||
throw new ExportException("unable to send documents because none were loaded for export bulk [{}]", name);
|
||||
} else if (payload.length != 0) {
|
||||
final HttpEntity body = new ByteArrayEntity(payload, ContentType.APPLICATION_JSON);
|
||||
|
||||
client.performRequestAsync("POST", "/_bulk", params, body, HttpExportBulkResponseListener.INSTANCE);
|
||||
|
||||
// free the memory
|
||||
payload = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
// nothing serious to do at this stage
|
||||
assert payload == null;
|
||||
}
|
||||
|
||||
private byte[] toBulkBytes(final MonitoringDoc doc) throws IOException {
|
||||
final XContentType xContentType = XContentType.JSON;
|
||||
final XContent xContent = xContentType.xContent();
|
||||
|
||||
try (final BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
MonitoringIndexNameResolver<MonitoringDoc> resolver = registry.getResolver(doc);
|
||||
|
||||
if (resolver != null) {
|
||||
String index = resolver.index(doc);
|
||||
String type = resolver.type(doc);
|
||||
String id = resolver.id(doc);
|
||||
|
||||
try (XContentBuilder builder = new XContentBuilder(xContent, out)) {
|
||||
// Builds the bulk action metadata line
|
||||
builder.startObject();
|
||||
builder.startObject("index");
|
||||
builder.field("_index", index);
|
||||
builder.field("_type", type);
|
||||
if (id != null) {
|
||||
builder.field("_id", id);
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
// Adds action metadata line bulk separator
|
||||
out.write(xContent.streamSeparator());
|
||||
|
||||
// Render the monitoring document
|
||||
BytesRef bytesRef = resolver.source(doc, xContentType).toBytesRef();
|
||||
out.write(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
|
||||
// Adds final bulk separator
|
||||
out.write(xContent.streamSeparator());
|
||||
|
||||
logger.trace("added index request [index={}, type={}, id={}]", index, type, id);
|
||||
} else {
|
||||
logger.error("no resolver found for monitoring document [class={}, id={}, version={}]",
|
||||
doc.getClass().getName(), doc.getMonitoringId(), doc.getMonitoringVersion());
|
||||
}
|
||||
|
||||
return BytesReference.toBytes(out.bytes());
|
||||
} catch (Exception e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to render document [{}], skipping it [{}]", doc, name), e);
|
||||
|
||||
return BytesRef.EMPTY_BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseListener;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* {@code HttpExportBulkResponseListener} logs issues based on the response, but otherwise does nothing else.
|
||||
*/
|
||||
class HttpExportBulkResponseListener implements ResponseListener {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(HttpExportBulkResponseListener.class);
|
||||
|
||||
/**
|
||||
* Singleton instance.
|
||||
*/
|
||||
public static final HttpExportBulkResponseListener INSTANCE = new HttpExportBulkResponseListener(XContentType.JSON.xContent());
|
||||
|
||||
/**
|
||||
* The response content type.
|
||||
*/
|
||||
private final XContent xContent;
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpExportBulkResponseListener}.
|
||||
*
|
||||
* @param xContent The {@code XContent} to use for parsing the response.
|
||||
*/
|
||||
HttpExportBulkResponseListener(final XContent xContent) {
|
||||
this.xContent = Objects.requireNonNull(xContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Success is relative with bulk responses because unless it's rejected outright, it returns with a 200.
|
||||
* <p>
|
||||
* Individual documents can fail and since we know how we're making them, that means that .
|
||||
*/
|
||||
@Override
|
||||
public void onSuccess(final Response response) {
|
||||
try (final XContentParser parser = xContent.createParser(response.getEntity().getContent())) {
|
||||
// avoid parsing the entire payload if we don't need too
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if ("errors".equals(currentFieldName)) {
|
||||
// no errors? then we can stop looking
|
||||
if (parser.booleanValue() == false) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
// note: this assumes that "items" is the only array portion of the response (currently true)
|
||||
parseErrors(parser);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException | RuntimeException e) {
|
||||
onError("unexpected exception while verifying bulk response", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs every <code>error</code> field's value until it hits the end of an array.
|
||||
*
|
||||
* @param parser The bulk response parser
|
||||
* @throws IOException if any parsing error occurs
|
||||
*/
|
||||
private void parseErrors(final XContentParser parser) throws IOException {
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if ("error".equals(currentFieldName)) {
|
||||
onItemError(parser.text());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Log obvious failures.
|
||||
* <p>
|
||||
* In the future, we should queue replayable failures.
|
||||
*/
|
||||
@Override
|
||||
public void onFailure(final Exception exception) {
|
||||
// queueable exceptions:
|
||||
// - RestStatus.TOO_MANY_REQUESTS.getStatus()
|
||||
// - possibly other, non-ResponseExceptions
|
||||
onError("bulk request failed unexpectedly", exception);
|
||||
}
|
||||
|
||||
void onError(final String msg, final Throwable cause) {
|
||||
logger.warn(msg, cause);
|
||||
}
|
||||
|
||||
void onItemError(final String text) {
|
||||
logger.warn("unexpected error while indexing monitoring document: [{}]", text);
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
|
||||
public class HttpExporterUtils {
|
||||
|
||||
public static URL parseHostWithPath(String host, String path) throws URISyntaxException, MalformedURLException {
|
||||
|
||||
if (!host.contains("://")) {
|
||||
// prefix with http
|
||||
host = "http://" + host;
|
||||
}
|
||||
if (!host.endsWith("/")) {
|
||||
// make sure we can safely resolves sub paths and not replace parent folders
|
||||
host = host + "/";
|
||||
}
|
||||
|
||||
URL hostUrl = new URL(host);
|
||||
|
||||
if (hostUrl.getPort() == -1) {
|
||||
// url has no port, default to 9200 - sadly we need to rebuild..
|
||||
StringBuilder newUrl = new StringBuilder(hostUrl.getProtocol() + "://");
|
||||
newUrl.append(hostUrl.getHost()).append(":9200").append(hostUrl.toURI().getPath());
|
||||
hostUrl = new URL(newUrl.toString());
|
||||
|
||||
}
|
||||
return new URL(hostUrl, path);
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* {@code HttpHostBuilder} creates an {@link HttpHost} meant to be used with an Elasticsearch cluster. The {@code HttpHostBuilder} uses
|
||||
* defaults that are most common for Elasticsearch, including an unspecified port defaulting to <code>9200</code> and the default scheme
|
||||
* being <code>http</code> (as opposed to <code>https</code>).
|
||||
* <p>
|
||||
* The only <em>required</em> detail is the host to connect too, either via hostname or IP address.
|
||||
* <p>
|
||||
* This enables you to create an {@code HttpHost} directly via a builder mechanism, or indirectly by parsing a URI-like string. For example:
|
||||
* <pre><code>
|
||||
* HttpHost host1 = HttpHostBuilder.builder("localhost").build(); // http://localhost:9200
|
||||
* HttpHost host2 = HttpHostBuilder.builder("localhost:9200").build(); // http://localhost:9200
|
||||
* HttpHost host4 = HttpHostBuilder.builder("http://localhost:9200").build(); // http://localhost:9200
|
||||
* HttpHost host5 = HttpHostBuilder.builder("https://localhost:9200").build(); // https://localhost:9200
|
||||
* HttpHost host6 = HttpHostBuilder.builder("https://localhost:9200").build(); // https://127.0.0.1:9200 (IPv4 localhost)
|
||||
* HttpHost host7 = HttpHostBuilder.builder("http://10.1.2.3").build(); // http://10.2.3.4:9200
|
||||
* HttpHost host8 = HttpHostBuilder.builder("https://[::1]").build(); // http://[::1]:9200 (IPv6 localhost)
|
||||
* HttpHost host9 = HttpHostBuilder.builder("https://[::1]:9200").build(); // http://[::1]:9200 (IPv6 localhost)
|
||||
* HttpHost host10= HttpHostBuilder.builder("https://sub.domain").build(); // https://sub.domain:9200
|
||||
* </code></pre>
|
||||
* Note: {@code HttpHost}s are the mechanism that the {@link RestClient} uses to build the base request. If you need to specify proxy
|
||||
* settings, then use the {@link RestClientBuilder.RequestConfigCallback} to configure the {@code Proxy} settings.
|
||||
*
|
||||
* @see #builder(String)
|
||||
* @see #builder()
|
||||
*/
|
||||
public class HttpHostBuilder {
|
||||
|
||||
/**
|
||||
* The scheme used to connect to Elasticsearch.
|
||||
*/
|
||||
private Scheme scheme = Scheme.HTTP;
|
||||
/**
|
||||
* The host is the only required portion of the supplied URI when building it. The rest can be defaulted.
|
||||
*/
|
||||
private String host = null;
|
||||
/**
|
||||
* The port used to connect to Elasticsearch.
|
||||
* <p>
|
||||
* The default port is 9200 when unset.
|
||||
*/
|
||||
private int port = -1;
|
||||
|
||||
/**
|
||||
* Create an empty {@link HttpHostBuilder}.
|
||||
* <p>
|
||||
* The expectation is that you then explicitly build the {@link HttpHost} piece-by-piece.
|
||||
* <p>
|
||||
* For example:
|
||||
* <pre><code>
|
||||
* HttpHost localhost = HttpHostBuilder.builder().host("localhost").build(); // http://localhost:9200
|
||||
* HttpHost explicitLocalhost = HttpHostBuilder.builder.().scheme(Scheme.HTTP).host("localhost").port(9200).build();
|
||||
* // http://localhost:9200
|
||||
* HttpHost secureLocalhost = HttpHostBuilder.builder().scheme(Scheme.HTTPS).host("localhost").build(); // https://localhost:9200
|
||||
* HttpHost differentPort = HttpHostBuilder.builder().host("my_host").port(19200).build(); // https://my_host:19200
|
||||
* HttpHost ipBased = HttpHostBuilder.builder().host("192.168.0.11").port(80).build(); // https://192.168.0.11:80
|
||||
* </code></pre>
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public static HttpHostBuilder builder() {
|
||||
return new HttpHostBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an empty {@link HttpHostBuilder}.
|
||||
* <p>
|
||||
* The expectation is that you then explicitly build the {@link HttpHost} piece-by-piece.
|
||||
* <p>
|
||||
* For example:
|
||||
* <pre><code>
|
||||
* HttpHost localhost = HttpHostBuilder.builder("localhost").build(); // http://localhost:9200
|
||||
* HttpHost explicitLocalhost = HttpHostBuilder.builder("http://localhost:9200").build(); // http://localhost:9200
|
||||
* HttpHost secureLocalhost = HttpHostBuilder.builder("https://localhost").build(); // https://localhost:9200
|
||||
* HttpHost differentPort = HttpHostBuilder.builder("my_host:19200").build(); // http://my_host:19200
|
||||
* HttpHost ipBased = HttpHostBuilder.builder("192.168.0.11:80").build(); // http://192.168.0.11:80
|
||||
* </code></pre>
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
* @throws NullPointerException if {@code uri} is {@code null}.
|
||||
* @throws IllegalArgumentException if any issue occurs while parsing the {@code uri}.
|
||||
*/
|
||||
public static HttpHostBuilder builder(final String uri) {
|
||||
return new HttpHostBuilder(uri);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpHost} from scratch.
|
||||
*/
|
||||
HttpHostBuilder() {
|
||||
// everything is in the default state
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpHost} based on the supplied host.
|
||||
*
|
||||
* @param uri The [partial] URI used to build.
|
||||
* @throws NullPointerException if {@code uri} is {@code null}.
|
||||
* @throws IllegalArgumentException if any issue occurs while parsing the {@code uri}.
|
||||
*/
|
||||
HttpHostBuilder(final String uri) {
|
||||
Objects.requireNonNull(uri, "uri must not be null");
|
||||
|
||||
try {
|
||||
String cleanedUri = uri;
|
||||
|
||||
if (uri.contains("://") == false) {
|
||||
cleanedUri = "http://" + uri;
|
||||
}
|
||||
|
||||
final URI parsedUri = new URI(cleanedUri);
|
||||
|
||||
// "localhost:9200" doesn't have a scheme
|
||||
if (parsedUri.getScheme() != null) {
|
||||
scheme(Scheme.fromString(parsedUri.getScheme()));
|
||||
}
|
||||
|
||||
if (parsedUri.getHost() != null) {
|
||||
host(parsedUri.getHost());
|
||||
} else {
|
||||
// if the host is null, then it means one of two things: we're in a broken state _or_ it had something like underscores
|
||||
// we want the raw form so that parts of the URI are not decoded
|
||||
final String host = parsedUri.getRawAuthority();
|
||||
|
||||
// they explicitly provided the port, which is unparsed when the host is null
|
||||
if (host.contains(":")) {
|
||||
final String[] hostPort = host.split(":", 2);
|
||||
|
||||
host(hostPort[0]);
|
||||
port(Integer.parseInt(hostPort[1]));
|
||||
} else {
|
||||
host(host);
|
||||
}
|
||||
}
|
||||
|
||||
if (parsedUri.getPort() != -1) {
|
||||
port(parsedUri.getPort());
|
||||
}
|
||||
|
||||
// fail for proxies
|
||||
if (parsedUri.getRawPath() != null && parsedUri.getRawPath().isEmpty() == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"HttpHosts do not use paths [" + parsedUri.getRawPath() +
|
||||
"]. see setRequestConfigCallback for proxies. value: [" + uri + "]");
|
||||
}
|
||||
} catch (URISyntaxException | IndexOutOfBoundsException | NullPointerException e) {
|
||||
throw new IllegalArgumentException("error parsing host: [" + uri + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the scheme (aka protocol) for the {@link HttpHost}.
|
||||
*
|
||||
* @param scheme The scheme to use.
|
||||
* @return Always {@code this}.
|
||||
* @throws NullPointerException if {@code scheme} is {@code null}.
|
||||
*/
|
||||
public HttpHostBuilder scheme(final Scheme scheme) {
|
||||
this.scheme = Objects.requireNonNull(scheme);
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the host for the {@link HttpHost}.
|
||||
* <p>
|
||||
* This does not attempt to parse the {@code host} in any way.
|
||||
*
|
||||
* @param host The host to use.
|
||||
* @return Always {@code this}.
|
||||
* @throws NullPointerException if {@code host} is {@code null}.
|
||||
*/
|
||||
public HttpHostBuilder host(final String host) {
|
||||
this.host = Objects.requireNonNull(host);
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the port for the {@link HttpHost}.
|
||||
* <p>
|
||||
* Specifying the {@code port} as -1 will cause it to be defaulted to 9200 when the {@code HttpHost} is built.
|
||||
*
|
||||
* @param port The port to use.
|
||||
* @return Always {@code this}.
|
||||
* @throws IllegalArgumentException if the {@code port} is not -1 or [1, 65535].
|
||||
*/
|
||||
public HttpHostBuilder port(final int port) {
|
||||
// setting a port to 0 makes no sense when you're the client; -1 allows us to use the default when we build
|
||||
if (port != -1 && (port < 1 || port > 65535)) {
|
||||
throw new IllegalArgumentException("port must be -1 for the default or [1, 65535]. was: " + port);
|
||||
}
|
||||
|
||||
this.port = port;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpHost} from the current {@code scheme}, {@code host}, and {@code port}.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
* @throws IllegalStateException if {@code host} is unset.
|
||||
*/
|
||||
public HttpHost build() {
|
||||
if (host == null) {
|
||||
throw new IllegalStateException("host must be set");
|
||||
}
|
||||
|
||||
return new HttpHost(host, port == -1 ? 9200 : port, scheme.toString());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* An {@code HttpResource} is some "thing" that needs to exist on the other side. If it does not exist, then follow-on actions cannot
|
||||
* occur.
|
||||
* <p>
|
||||
* {@code HttpResource}s can assume that, as long as the connection stays active, then a verified resource should continue to exist on the
|
||||
* other side.
|
||||
*
|
||||
* @see MultiHttpResource
|
||||
* @see PublishableHttpResource
|
||||
*/
|
||||
public abstract class HttpResource {
|
||||
|
||||
/**
|
||||
* The current state of the {@link HttpResource}.
|
||||
*/
|
||||
enum State {
|
||||
|
||||
/**
|
||||
* The resource is ready to use.
|
||||
*/
|
||||
CLEAN,
|
||||
/**
|
||||
* The resource is being checked right now to see if it can be used.
|
||||
*/
|
||||
CHECKING,
|
||||
/**
|
||||
* The resource needs to be checked before it can be used.
|
||||
*/
|
||||
DIRTY
|
||||
}
|
||||
|
||||
/**
|
||||
* The user-recognizable name for whatever owns this {@link HttpResource}.
|
||||
*/
|
||||
protected final String resourceOwnerName;
|
||||
/**
|
||||
* The current state of the resource, which helps to determine if it needs to be checked.
|
||||
*/
|
||||
protected final AtomicReference<State> state;
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpResource} that {@linkplain #isDirty() is dirty}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
*/
|
||||
protected HttpResource(final String resourceOwnerName) {
|
||||
this(resourceOwnerName, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link HttpResource} that is {@code dirty}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param dirty Whether the resource is dirty or not
|
||||
*/
|
||||
protected HttpResource(final String resourceOwnerName, final boolean dirty) {
|
||||
this.resourceOwnerName = Objects.requireNonNull(resourceOwnerName);
|
||||
this.state = new AtomicReference<>(dirty ? State.DIRTY : State.CLEAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the resource owner for this {@link HttpResource}.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public String getResourceOwnerName() {
|
||||
return resourceOwnerName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the resource needs to be checked.
|
||||
*
|
||||
* @return {@code true} to indicate that the resource should block follow-on actions that require it.
|
||||
* @see #checkAndPublish(RestClient)
|
||||
*/
|
||||
public boolean isDirty() {
|
||||
return state.get() != State.CLEAN;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark the resource as {@linkplain #isDirty() dirty}.
|
||||
*/
|
||||
public final void markDirty() {
|
||||
state.compareAndSet(State.CLEAN, State.DIRTY);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the resource is currently {@linkplain #isDirty() dirty}, then check and, if necessary, publish this {@link HttpResource}.
|
||||
* <p>
|
||||
* Expected usage:
|
||||
* <pre><code>
|
||||
* if (resource.checkAndPublishIfDirty(client)) {
|
||||
* // use client with resources having been verified
|
||||
* }
|
||||
* </code></pre>
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if the resource is available for use. {@code false} to stop.
|
||||
*/
|
||||
public final boolean checkAndPublishIfDirty(final RestClient client) {
|
||||
final State state = this.state.get();
|
||||
|
||||
// get in line and wait until the check passes or fails if it's checking now, or start checking
|
||||
return state == State.CLEAN || blockUntilCheckAndPublish(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by {@link #checkAndPublishIfDirty(RestClient)} to block incase {@link #checkAndPublish(RestClient)} is in the middle of
|
||||
* {@linkplain State#CHECKING checking}.
|
||||
* <p>
|
||||
* Unlike {@link #isDirty()} and {@link #checkAndPublishIfDirty(RestClient)}, this is {@code synchronized} in order to prevent
|
||||
* double-execution and it invokes {@link #checkAndPublish(RestClient)} if it's {@linkplain State#DIRTY dirty}.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if the resource is available for use. {@code false} to stop.
|
||||
*/
|
||||
private synchronized boolean blockUntilCheckAndPublish(final RestClient client) {
|
||||
final State state = this.state.get();
|
||||
|
||||
return state == State.CLEAN || (state == State.DIRTY && checkAndPublish(client));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and, if necessary, publish this {@link HttpResource}.
|
||||
* <p>
|
||||
* This will perform the check regardless of the {@linkplain #isDirty() dirtiness} and it will update the dirtiness.
|
||||
* Using this directly can be useful if there is ever a need to double-check dirtiness without having to {@linkplain #markDirty() mark}
|
||||
* it as dirty.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if the resource is available for use. {@code false} to stop.
|
||||
* @see #isDirty()
|
||||
*/
|
||||
public final synchronized boolean checkAndPublish(final RestClient client) {
|
||||
// we always check when asked, regardless of clean or dirty
|
||||
state.set(State.CHECKING);
|
||||
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
success = doCheckAndPublish(client);
|
||||
} finally {
|
||||
// nothing else should be unsetting from CHECKING
|
||||
assert state.get() == State.CHECKING;
|
||||
|
||||
state.set(success ? State.CLEAN : State.DIRTY);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform whatever is necessary to check and publish this {@link HttpResource}.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if the resource is available for use. {@code false} to stop.
|
||||
*/
|
||||
protected abstract boolean doCheckAndPublish(final RestClient client);
|
||||
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* {@code MultiHttpResource} serves as a wrapper of a {@link List} of {@link HttpResource}s.
|
||||
* <p>
|
||||
* By telling the {@code MultiHttpResource} to become dirty, it effectively marks all of its sub-resources dirty as well.
|
||||
* <p>
|
||||
* Sub-resources should be the sole responsibility of the the {@code MultiHttpResource}; there should not be something using them directly
|
||||
* if they are included in a {@code MultiHttpResource}.
|
||||
*/
|
||||
public class MultiHttpResource extends HttpResource {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(MultiHttpResource.class);
|
||||
|
||||
/**
|
||||
* Sub-resources that are grouped to simplify notification.
|
||||
*/
|
||||
private final List<HttpResource> resources;
|
||||
|
||||
/**
|
||||
* Create a {@link MultiHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param resources The sub-resources to aggregate.
|
||||
*/
|
||||
public MultiHttpResource(final String resourceOwnerName, final List<? extends HttpResource> resources) {
|
||||
super(resourceOwnerName);
|
||||
|
||||
this.resources = Collections.unmodifiableList(resources);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the resources that are checked by this {@link MultiHttpResource}.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public List<HttpResource> getResources() {
|
||||
return resources;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and publish all {@linkplain #resources sub-resources}.
|
||||
*/
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(RestClient client) {
|
||||
logger.trace("checking sub-resources existence and publishing on the [{}]", resourceOwnerName);
|
||||
|
||||
boolean exists = true;
|
||||
|
||||
// short-circuits on the first failure, thus marking the whole thing dirty
|
||||
for (final HttpResource resource : resources) {
|
||||
if (resource.checkAndPublish(client) == false) {
|
||||
exists = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
logger.trace("all sub-resources exist [{}] on the [{}]", exists, resourceOwnerName);
|
||||
|
||||
return exists;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.sniff.Sniffer;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
/**
|
||||
* {@code NodeFailureListener} logs warnings for any node failure, but it can also notify a {@link Sniffer} and/or {@link HttpResource}
|
||||
* upon failures as well.
|
||||
* <p>
|
||||
* The {@linkplain #setSniffer(Sniffer) sniffer} and {@linkplain #setResource(HttpResource) resource} are expected to be set immediately
|
||||
* or not at all.
|
||||
*/
|
||||
class NodeFailureListener extends RestClient.FailureListener {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(NodeFailureListener.class);
|
||||
|
||||
/**
|
||||
* The optional {@link Sniffer} associated with the {@link RestClient}.
|
||||
*/
|
||||
@Nullable
|
||||
private SetOnce<Sniffer> sniffer = new SetOnce<>();
|
||||
/**
|
||||
* The optional {@link HttpResource} associated with the {@link RestClient}.
|
||||
*/
|
||||
@Nullable
|
||||
private SetOnce<HttpResource> resource = new SetOnce<>();
|
||||
|
||||
/**
|
||||
* Get the {@link Sniffer} that is notified upon node failure.
|
||||
*
|
||||
* @return Can be {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public Sniffer getSniffer() {
|
||||
return sniffer.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the {@link Sniffer} that is notified upon node failure.
|
||||
*
|
||||
* @param sniffer The sniffer to notify
|
||||
* @throws SetOnce.AlreadySetException if called more than once
|
||||
*/
|
||||
public void setSniffer(@Nullable final Sniffer sniffer) {
|
||||
this.sniffer.set(sniffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link HttpResource} that is notified upon node failure.
|
||||
*
|
||||
* @return Can be {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public HttpResource getResource() {
|
||||
return resource.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the {@link HttpResource} that is notified upon node failure.
|
||||
*
|
||||
* @param resource The resource to notify
|
||||
* @throws SetOnce.AlreadySetException if called more than once
|
||||
*/
|
||||
public void setResource(@Nullable final HttpResource resource) {
|
||||
this.resource.set(resource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final HttpHost host) {
|
||||
logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort());
|
||||
|
||||
final HttpResource resource = this.resource.get();
|
||||
final Sniffer sniffer = this.sniffer.get();
|
||||
|
||||
if (resource != null) {
|
||||
resource.markDirty();
|
||||
}
|
||||
if (sniffer != null) {
|
||||
sniffer.sniffOnFailure(host);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* {@code PipelineHttpResource}s allow the checking and uploading of ingest pipelines to a remote cluster.
|
||||
* <p>
|
||||
* In the future, we will need to also support the transformation or replacement of pipelines based on their version, but we do not need
|
||||
* that functionality until some breaking change in the Monitoring API requires it.
|
||||
*/
|
||||
public class PipelineHttpResource extends PublishableHttpResource {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(PipelineHttpResource.class);
|
||||
|
||||
/**
|
||||
* The name of the pipeline that is sent to the remote cluster.
|
||||
*/
|
||||
private final String pipelineName;
|
||||
/**
|
||||
* Provides a fully formed template (e.g., no variables that need replaced).
|
||||
*/
|
||||
private final Supplier<byte[]> pipeline;
|
||||
|
||||
/**
|
||||
* Create a new {@link PipelineHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param pipelineName The name of the template (e.g., ".pipeline123").
|
||||
* @param pipeline The pipeline provider.
|
||||
*/
|
||||
public PipelineHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
|
||||
final String pipelineName, final Supplier<byte[]> pipeline) {
|
||||
super(resourceOwnerName, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
|
||||
|
||||
this.pipelineName = Objects.requireNonNull(pipelineName);
|
||||
this.pipeline = Objects.requireNonNull(pipeline);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the current {@linkplain #pipelineName pipeline} exists.
|
||||
*/
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
return checkForResource(client, logger,
|
||||
"/_ingest/pipeline", pipelineName, "monitoring pipeline",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish the current {@linkplain #pipelineName pipeline}.
|
||||
*/
|
||||
@Override
|
||||
protected boolean doPublish(final RestClient client) {
|
||||
return putResource(client, logger,
|
||||
"/_ingest/pipeline", pipelineName, this::pipelineToHttpEntity, "monitoring pipeline",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link HttpEntity} for the {@link #pipeline}.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
HttpEntity pipelineToHttpEntity() {
|
||||
return new ByteArrayEntity(pipeline.get(), ContentType.APPLICATION_JSON);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked <em>and</em>
|
||||
* published in the event that the check does not pass.
|
||||
*
|
||||
* @see #doCheck(RestClient)
|
||||
* @see #doPublish(RestClient)
|
||||
*/
|
||||
public abstract class PublishableHttpResource extends HttpResource {
|
||||
|
||||
/**
|
||||
* {@code CheckResponse} provides a ternary state for {@link #doCheck(RestClient)}.
|
||||
*/
|
||||
public enum CheckResponse {
|
||||
|
||||
/**
|
||||
* The check found the resource, so nothing needs to be published.
|
||||
*/
|
||||
EXISTS,
|
||||
/**
|
||||
* The check did not find the resource, so we need to attempt to publish it.
|
||||
*/
|
||||
DOES_NOT_EXIST,
|
||||
/**
|
||||
* The check hit an unexpected exception that should block publishing attempts until it can check again.
|
||||
*/
|
||||
ERROR
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* A value that will never match anything in the JSON response body, thus limiting it to "{}".
|
||||
*/
|
||||
public static final String FILTER_PATH_NONE = "$NONE";
|
||||
|
||||
/**
|
||||
* Use this to avoid getting any JSON response from a request.
|
||||
*/
|
||||
public static final Map<String, String> NO_BODY_PARAMETERS = Collections.singletonMap("filter_path", FILTER_PATH_NONE);
|
||||
|
||||
/**
|
||||
* The default parameters to use for any request.
|
||||
*/
|
||||
protected final Map<String, String> parameters;
|
||||
|
||||
/**
|
||||
* Create a new {@link PublishableHttpResource} that {@linkplain #isDirty() is dirty}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param baseParameters The base parameters to specify for the request.
|
||||
*/
|
||||
protected PublishableHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
|
||||
final Map<String, String> baseParameters) {
|
||||
this(resourceOwnerName, masterTimeout, baseParameters, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link PublishableHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param baseParameters The base parameters to specify for the request.
|
||||
* @param dirty Whether the resource is dirty or not
|
||||
*/
|
||||
protected PublishableHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
|
||||
final Map<String, String> baseParameters, final boolean dirty) {
|
||||
super(resourceOwnerName, dirty);
|
||||
|
||||
if (masterTimeout != null) {
|
||||
final Map<String, String> parameters = new HashMap<>(baseParameters.size() + 1);
|
||||
|
||||
parameters.putAll(baseParameters);
|
||||
parameters.put("master_timeout", masterTimeout.toString());
|
||||
|
||||
this.parameters = Collections.unmodifiableMap(parameters);
|
||||
} else {
|
||||
this.parameters = baseParameters;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default parameters to use with every request.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public Map<String, String> getParameters() {
|
||||
return parameters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform whatever is necessary to check and publish this {@link PublishableHttpResource}.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if the resource is available for use. {@code false} to stop.
|
||||
*/
|
||||
@Override
|
||||
protected final boolean doCheckAndPublish(final RestClient client) {
|
||||
final CheckResponse check = doCheck(client);
|
||||
|
||||
// errors cause a dead-stop
|
||||
return check != CheckResponse.ERROR && (check == CheckResponse.EXISTS || doPublish(client));
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the current resource exists.
|
||||
* <ul>
|
||||
* <li>
|
||||
* {@link CheckResponse#EXISTS EXISTS} will <em>not</em> run {@link #doPublish(RestClient)} and mark this as <em>not</em> dirty.
|
||||
* </li>
|
||||
* <li>
|
||||
* {@link CheckResponse#DOES_NOT_EXIST DOES_NOT_EXIST} will run {@link #doPublish(RestClient)}, which determines the dirtiness.
|
||||
* </li>
|
||||
* <li>{@link CheckResponse#ERROR ERROR} will <em>not</em> run {@link #doPublish(RestClient)} and mark this as dirty.</li>
|
||||
* </ul>
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
protected abstract CheckResponse doCheck(final RestClient client);
|
||||
|
||||
/**
|
||||
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint.
|
||||
* <p>
|
||||
* This provides the base-level check for any resource that does not need to inspect its actual contents.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @param logger The logger to use for status messages.
|
||||
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
|
||||
* @param resourceName The name of the resource (e.g., "template123").
|
||||
* @param resourceType The type of resource (e.g., "monitoring template").
|
||||
* @param resourceOwnerName The user-recognizeable resource owner.
|
||||
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
protected CheckResponse checkForResource(final RestClient client, final Logger logger,
|
||||
final String resourceBasePath,
|
||||
final String resourceName, final String resourceType,
|
||||
final String resourceOwnerName, final String resourceOwnerType) {
|
||||
logger.trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
try {
|
||||
final Response response = client.performRequest("GET", resourceBasePath + "/" + resourceName, parameters);
|
||||
|
||||
// we don't currently check for the content because we always expect it to be the same;
|
||||
// if we ever make a BWC change to any template (thus without renaming it), then we need to check the content!
|
||||
if (response.getStatusLine().getStatusCode() == RestStatus.OK.getStatus()) {
|
||||
logger.debug("{} [{}] found on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
return CheckResponse.EXISTS;
|
||||
} else {
|
||||
throw new ResponseException(response);
|
||||
}
|
||||
} catch (final ResponseException e) {
|
||||
final int statusCode = e.getResponse().getStatusLine().getStatusCode();
|
||||
|
||||
// 404
|
||||
if (statusCode == RestStatus.NOT_FOUND.getStatus()) {
|
||||
logger.debug("{} [{}] does not exist on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
return CheckResponse.DOES_NOT_EXIST;
|
||||
} else {
|
||||
logger.error((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to verify {} [{}] on the [{}] {} with status code [{}]",
|
||||
resourceType, resourceName, resourceOwnerName, resourceOwnerType, statusCode),
|
||||
e);
|
||||
|
||||
// weirder failure than below; block responses just like other unexpected failures
|
||||
return CheckResponse.ERROR;
|
||||
}
|
||||
} catch (IOException | RuntimeException e) {
|
||||
logger.error((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to verify {} [{}] on the [{}] {}",
|
||||
resourceType, resourceName, resourceOwnerName, resourceOwnerType),
|
||||
e);
|
||||
|
||||
// do not attempt to publish the resource because we're in a broken state
|
||||
return CheckResponse.ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish the current resource.
|
||||
* <p>
|
||||
* This is only invoked if {@linkplain #doCheck(RestClient) the check} fails.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @return {@code true} if it exists.
|
||||
*/
|
||||
protected abstract boolean doPublish(final RestClient client);
|
||||
|
||||
/**
|
||||
* Upload the {@code resourceName} to the {@code resourceBasePath} endpoint.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @param logger The logger to use for status messages.
|
||||
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
|
||||
* @param resourceName The name of the resource (e.g., "template123").
|
||||
* @param body The {@link HttpEntity} that makes up the body of the request.
|
||||
* @param resourceType The type of resource (e.g., "monitoring template").
|
||||
* @param resourceOwnerName The user-recognizeable resource owner.
|
||||
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
|
||||
*/
|
||||
protected boolean putResource(final RestClient client, final Logger logger,
|
||||
final String resourceBasePath,
|
||||
final String resourceName, final java.util.function.Supplier<HttpEntity> body,
|
||||
final String resourceType,
|
||||
final String resourceOwnerName, final String resourceOwnerType) {
|
||||
logger.trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
final Response response = client.performRequest("PUT", resourceBasePath + "/" + resourceName, parameters, body.get());
|
||||
final int statusCode = response.getStatusLine().getStatusCode();
|
||||
|
||||
// 200 or 201
|
||||
if (statusCode == RestStatus.OK.getStatus() || statusCode == RestStatus.CREATED.getStatus()) {
|
||||
logger.debug("{} [{}] uploaded to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
success = true;
|
||||
} else {
|
||||
throw new RuntimeException("[" + resourceBasePath + "/" + resourceName + "] responded with [" + statusCode + "]");
|
||||
}
|
||||
} catch (IOException | RuntimeException e) {
|
||||
logger.error((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to upload {} [{}] on the [{}] {}",
|
||||
resourceType, resourceName, resourceOwnerName, resourceOwnerType),
|
||||
e);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* {@code Scheme} provides the list of supported {@code URI} schemes (aka protocols) for working with Elasticsearch via the
|
||||
* {@link RestClient}.
|
||||
*
|
||||
* @see HttpHostBuilder
|
||||
*/
|
||||
public enum Scheme {
|
||||
|
||||
/**
|
||||
* HTTP is the default {@linkplain Scheme scheme} used by Elasticsearch.
|
||||
*/
|
||||
HTTP("http"),
|
||||
/**
|
||||
* HTTPS is the secure form of {@linkplain #HTTP http}, which requires that Elasticsearch be using X-Pack Security with TLS/SSL or
|
||||
* a similar securing mechanism.
|
||||
*/
|
||||
HTTPS("https");
|
||||
|
||||
private final String scheme;
|
||||
|
||||
Scheme(final String scheme) {
|
||||
this.scheme = scheme;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return scheme;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine the {@link Scheme} from the {@code scheme}.
|
||||
* <pre><code>
|
||||
* Scheme http = Scheme.fromString("http");
|
||||
* Scheme https = Scheme.fromString("https");
|
||||
* Scheme httpsCaps = Scheme.fromString("HTTPS"); // same as https
|
||||
* </code></pre>
|
||||
*
|
||||
* @param scheme The scheme to check.
|
||||
* @return Never {@code null}.
|
||||
* @throws NullPointerException if {@code scheme} is {@code null}.
|
||||
* @throws IllegalArgumentException if the {@code scheme} is not supported.
|
||||
*/
|
||||
public static Scheme fromString(final String scheme) {
|
||||
switch (scheme.toLowerCase(Locale.ROOT)) {
|
||||
case "http":
|
||||
return HTTP;
|
||||
case "https":
|
||||
return HTTPS;
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("unsupported scheme: [" + scheme + "]");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* {@code SecurityHttpClientConfigCallback} configures a {@link RestClient} for user authentication and SSL / TLS.
|
||||
*/
|
||||
class SecurityHttpClientConfigCallback implements RestClientBuilder.HttpClientConfigCallback {
|
||||
|
||||
/**
|
||||
* The optional {@link CredentialsProvider} for all requests to enable user authentication.
|
||||
*/
|
||||
@Nullable
|
||||
private final CredentialsProvider credentialsProvider;
|
||||
/**
|
||||
* The {@link SSLIOSessionStrategy} for all requests to enable SSL / TLS encryption.
|
||||
*/
|
||||
private final SSLIOSessionStrategy sslStrategy;
|
||||
|
||||
/**
|
||||
* Create a new {@link SecurityHttpClientConfigCallback}.
|
||||
*
|
||||
* @param credentialsProvider The credential provider, if a username/password have been supplied
|
||||
* @param sslStrategy The SSL strategy, if SSL / TLS have been supplied
|
||||
* @throws NullPointerException if {@code sslStrategy} is {@code null}
|
||||
*/
|
||||
SecurityHttpClientConfigCallback(final SSLIOSessionStrategy sslStrategy,
|
||||
@Nullable final CredentialsProvider credentialsProvider) {
|
||||
this.sslStrategy = Objects.requireNonNull(sslStrategy);
|
||||
this.credentialsProvider = credentialsProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link CredentialsProvider} that will be added to the HTTP client.
|
||||
*
|
||||
* @return Can be {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
CredentialsProvider getCredentialsProvider() {
|
||||
return credentialsProvider;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link SSLIOSessionStrategy} that will be added to the HTTP client.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
SSLIOSessionStrategy getSSLStrategy() {
|
||||
return sslStrategy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@linkplain HttpAsyncClientBuilder#setDefaultCredentialsProvider(CredentialsProvider) credential provider},
|
||||
* {@linkplain HttpAsyncClientBuilder#setSSLContext(SSLContext) SSL context}, and
|
||||
* {@linkplain HttpAsyncClientBuilder#setSSLHostnameVerifier(HostnameVerifier) SSL Hostname Verifier}.
|
||||
*
|
||||
* @param httpClientBuilder The client to configure.
|
||||
* @return Always {@code httpClientBuilder}.
|
||||
*/
|
||||
@Override
|
||||
public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) {
|
||||
// enable SSL / TLS
|
||||
httpClientBuilder.setSSLStrategy(sslStrategy);
|
||||
|
||||
// enable user authentication
|
||||
if (credentialsProvider != null) {
|
||||
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
|
||||
}
|
||||
|
||||
return httpClientBuilder;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* {@code TemplateHttpResource}s allow the checking and uploading of templates to a remote cluster.
|
||||
* <p>
|
||||
* There is currently no need to check the response body of the template for consistency, but if we ever make a backwards-compatible change
|
||||
* that requires the template to be replaced, then we will need to check for <em>something</em> in the body in order to see if we need to
|
||||
* replace the existing template(s).
|
||||
*/
|
||||
public class TemplateHttpResource extends PublishableHttpResource {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(TemplateHttpResource.class);
|
||||
|
||||
/**
|
||||
* The name of the template that is sent to the remote cluster.
|
||||
*/
|
||||
private final String templateName;
|
||||
/**
|
||||
* Provides a fully formed template (e.g., no variables that need replaced).
|
||||
*/
|
||||
private final Supplier<String> template;
|
||||
|
||||
/**
|
||||
* Create a new {@link TemplateHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param templateName The name of the template (e.g., ".template123").
|
||||
* @param template The template provider.
|
||||
*/
|
||||
public TemplateHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
|
||||
final String templateName, final Supplier<String> template) {
|
||||
super(resourceOwnerName, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
|
||||
|
||||
this.templateName = Objects.requireNonNull(templateName);
|
||||
this.template = Objects.requireNonNull(template);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the current {@linkplain #templateName template} exists.
|
||||
*/
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
return checkForResource(client, logger,
|
||||
"/_template", templateName, "monitoring template",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish the missing {@linkplain #templateName template}.
|
||||
*/
|
||||
@Override
|
||||
protected boolean doPublish(final RestClient client) {
|
||||
return putResource(client, logger,
|
||||
"/_template", templateName, this::templateToHttpEntity, "monitoring template",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link HttpEntity} for the {@link #template}.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
HttpEntity templateToHttpEntity() {
|
||||
return new StringEntity(template.get(), ContentType.APPLICATION_JSON);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.client.config.RequestConfig.Builder;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* {@code TimeoutRequestConfigCallback} enables the setting of connection-related timeouts for HTTP requests.
|
||||
*/
|
||||
class TimeoutRequestConfigCallback implements RestClientBuilder.RequestConfigCallback {
|
||||
|
||||
@Nullable
|
||||
private final TimeValue connectTimeout;
|
||||
@Nullable
|
||||
private final TimeValue socketTimeout;
|
||||
|
||||
/**
|
||||
* Create a new {@link TimeoutRequestConfigCallback}.
|
||||
*
|
||||
* @param connectTimeout The initial connection timeout, if any is supplied
|
||||
* @param socketTimeout The socket timeout, if any is supplied
|
||||
*/
|
||||
TimeoutRequestConfigCallback(@Nullable final TimeValue connectTimeout, @Nullable final TimeValue socketTimeout) {
|
||||
assert connectTimeout != null || socketTimeout != null : "pointless to use with defaults";
|
||||
|
||||
this.connectTimeout = connectTimeout;
|
||||
this.socketTimeout = socketTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the initial connection timeout.
|
||||
*
|
||||
* @return Can be {@code null} for default (1 second).
|
||||
*/
|
||||
@Nullable
|
||||
TimeValue getConnectTimeout() {
|
||||
return connectTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the socket timeout.
|
||||
*
|
||||
* @return Can be {@code null} for default (10 seconds).
|
||||
*/
|
||||
@Nullable
|
||||
TimeValue getSocketTimeout() {
|
||||
return socketTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@linkplain Builder#setConnectTimeout(int) connect timeout} and {@linkplain Builder#setSocketTimeout(int) socket timeout}.
|
||||
*
|
||||
* @param requestConfigBuilder The request to configure.
|
||||
* @return Always {@code requestConfigBuilder}.
|
||||
*/
|
||||
@Override
|
||||
public Builder customizeRequestConfig(Builder requestConfigBuilder) {
|
||||
if (connectTimeout != null) {
|
||||
requestConfigBuilder.setConnectTimeout((int)connectTimeout.millis());
|
||||
}
|
||||
if (socketTimeout != null) {
|
||||
requestConfigBuilder.setSocketTimeout((int)socketTimeout.millis());
|
||||
}
|
||||
|
||||
return requestConfigBuilder;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* {@code VersionHttpResource} verifies that the returned {@link Version} of Elasticsearch is at least the specified minimum version.
|
||||
*/
|
||||
public class VersionHttpResource extends HttpResource {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(VersionHttpResource.class);
|
||||
|
||||
/**
|
||||
* The parameters to pass with every version request to limit the output to just the version number.
|
||||
*/
|
||||
public static final Map<String, String> PARAMETERS = Collections.singletonMap("filter_path", "version.number");
|
||||
|
||||
/**
|
||||
* The minimum supported version of Elasticsearch.
|
||||
*/
|
||||
private final Version minimumVersion;
|
||||
|
||||
/**
|
||||
* Create a new {@link VersionHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param minimumVersion The minimum supported version of Elasticsearch.
|
||||
*/
|
||||
public VersionHttpResource(final String resourceOwnerName, final Version minimumVersion) {
|
||||
super(resourceOwnerName);
|
||||
|
||||
this.minimumVersion = Objects.requireNonNull(minimumVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that the minimum {@link Version} is supported on the remote cluster.
|
||||
* <p>
|
||||
* If it does not, then there is nothing that can be done except wait until it does. There is no publishing aspect to this operation.
|
||||
*/
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(final RestClient client) {
|
||||
logger.trace("checking [{}] to ensure that it supports the minimum version [{}]", resourceOwnerName, minimumVersion);
|
||||
|
||||
try {
|
||||
return validateVersion(client.performRequest("GET", "/", PARAMETERS));
|
||||
} catch (IOException | RuntimeException e) {
|
||||
logger.error(
|
||||
(Supplier<?>)() ->
|
||||
new ParameterizedMessage("failed to verify minimum version [{}] on the [{}] monitoring cluster",
|
||||
minimumVersion, resourceOwnerName),
|
||||
e);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that the {@code response} contains a {@link Version} that is {@linkplain Version#onOrAfter(Version) on or after} the
|
||||
* {@link #minimumVersion}.
|
||||
*
|
||||
* @param response The response to parse.
|
||||
* @return {@code true} if the remote cluster is running a supported version.
|
||||
* @throws NullPointerException if the response is malformed.
|
||||
* @throws ClassCastException if the response is malformed.
|
||||
* @throws IOException if any parsing issue occurs.
|
||||
*/
|
||||
private boolean validateVersion(final Response response) throws IOException {
|
||||
boolean supported = false;
|
||||
|
||||
try (final XContentParser parser = XContentType.JSON.xContent().createParser(response.getEntity().getContent())) {
|
||||
// the response should be filtered to just '{"version":{"number":"xyz"}}', so this is cheap and guaranteed
|
||||
@SuppressWarnings("unchecked")
|
||||
final String versionNumber = (String)((Map<String, Object>)parser.map().get("version")).get("number");
|
||||
final Version version = Version.fromString(versionNumber);
|
||||
|
||||
if (version.onOrAfter(minimumVersion)) {
|
||||
logger.debug("version [{}] >= [{}] and supported for [{}]", version, minimumVersion, resourceOwnerName);
|
||||
|
||||
supported = true;
|
||||
} else {
|
||||
logger.error("version [{}] < [{}] and NOT supported for [{}]", version, minimumVersion, resourceOwnerName);
|
||||
}
|
||||
}
|
||||
|
||||
return supported;
|
||||
}
|
||||
|
||||
}
|
|
@ -7,6 +7,8 @@ package org.elasticsearch.xpack.monitoring.exporter.local;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -23,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -54,6 +57,8 @@ import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString;
|
|||
*/
|
||||
public class LocalExporter extends Exporter implements ClusterStateListener, CleanerService.Listener {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(LocalExporter.class);
|
||||
|
||||
public static final String TYPE = "local";
|
||||
|
||||
private final InternalClient client;
|
||||
|
@ -104,7 +109,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
|
|||
@Override
|
||||
public void doClose() {
|
||||
if (state.getAndSet(State.TERMINATED) != State.TERMINATED) {
|
||||
logger.debug("stopped");
|
||||
logger.trace("stopped");
|
||||
clusterService.remove(this);
|
||||
cleanerService.remove(this);
|
||||
}
|
||||
|
|
|
@ -11,11 +11,13 @@ import org.elasticsearch.common.hash.MessageDigests;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterInfoMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<ClusterInfoMonitoringDoc> {
|
||||
|
@ -34,27 +36,38 @@ public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<Cluste
|
|||
|
||||
@Override
|
||||
protected void buildXContent(ClusterInfoMonitoringDoc document, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.field(Fields.CLUSTER_NAME, document.getClusterName());
|
||||
builder.field(Fields.VERSION, document.getVersion());
|
||||
builder.field("cluster_name", document.getClusterName());
|
||||
builder.field("version", document.getVersion());
|
||||
|
||||
License license = document.getLicense();
|
||||
final License license = document.getLicense();
|
||||
if (license != null) {
|
||||
builder.startObject(Fields.LICENSE);
|
||||
builder.startObject("license");
|
||||
Map<String, String> extraParams = new MapBuilder<String, String>()
|
||||
.put(License.REST_VIEW_MODE, "true")
|
||||
.map();
|
||||
params = new ToXContent.DelegatingMapParams(extraParams, params);
|
||||
license.toInnerXContent(builder, params);
|
||||
builder.field(Fields.HKEY, hash(license, document.getClusterUUID()));
|
||||
builder.field("hkey", hash(license, document.getClusterUUID()));
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.startObject(Fields.CLUSTER_STATS);
|
||||
ClusterStatsResponse clusterStats = document.getClusterStats();
|
||||
final ClusterStatsResponse clusterStats = document.getClusterStats();
|
||||
if (clusterStats != null) {
|
||||
builder.startObject("cluster_stats");
|
||||
clusterStats.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
final List<XPackFeatureSet.Usage> usages = document.getUsage();
|
||||
if (usages != null) {
|
||||
// in the future we may choose to add other usages under the stack_stats section, but it is only xpack for now
|
||||
// it may also be combined on the UI side of phone-home to add things like "kibana" and "logstash" under "stack_stats"
|
||||
builder.startObject("stack_stats").startObject("xpack");
|
||||
for (final XPackFeatureSet.Usage usage : usages) {
|
||||
builder.field(usage.name(), usage);
|
||||
}
|
||||
builder.endObject().endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
public static String hash(License license, String clusterName) {
|
||||
|
@ -66,15 +79,4 @@ public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<Cluste
|
|||
return MessageDigests.toHexString(MessageDigests.sha256().digest(toHash.getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String CLUSTER_NAME = "cluster_name";
|
||||
static final String LICENSE = "license";
|
||||
static final String VERSION = "version";
|
||||
static final String CLUSTER_STATS = "cluster_stats";
|
||||
|
||||
static final String HKEY = "hkey";
|
||||
|
||||
static final String UID = "uid";
|
||||
static final String TYPE = "type";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,16 @@ public class NodeStatsResolver extends MonitoringIndexNameResolver.Timestamped<N
|
|||
"node_stats.thread_pool.search.rejected",
|
||||
"node_stats.thread_pool.watcher.threads",
|
||||
"node_stats.thread_pool.watcher.queue",
|
||||
"node_stats.thread_pool.watcher.rejected");
|
||||
"node_stats.thread_pool.watcher.rejected",
|
||||
// Linux Only (at least for now)
|
||||
// Disk Info
|
||||
"node_stats.fs.data.spins",
|
||||
// Node IO Stats
|
||||
"node_stats.fs.io_stats.total.operations",
|
||||
"node_stats.fs.io_stats.total.read_operations",
|
||||
"node_stats.fs.io_stats.total.write_operations",
|
||||
"node_stats.fs.io_stats.total.read_kilobytes",
|
||||
"node_stats.fs.io_stats.total.write_kilobytes");
|
||||
FILTERS = Collections.unmodifiableSet(filters);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.support;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public final class VersionUtils {
|
||||
|
||||
public static final String VERSION_NUMBER_FIELD = "number";
|
||||
|
||||
private VersionUtils() {
|
||||
}
|
||||
|
||||
public static Version parseVersion(byte[] text) {
|
||||
return parseVersion(VERSION_NUMBER_FIELD, new String(text, Charset.forName("UTF-8")));
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract & parse the version contained in the given template
|
||||
*/
|
||||
public static Version parseVersion(String prefix, byte[] text) {
|
||||
return parseVersion(prefix, new String(text, Charset.forName("UTF-8")));
|
||||
}
|
||||
|
||||
public static Version parseVersion(String prefix, String text) {
|
||||
Pattern pattern = Pattern.compile(prefix + "\"\\s*:\\s*\"?([0-9a-zA-Z\\.\\-]+)\"?");
|
||||
Matcher matcher = pattern.matcher(text);
|
||||
if (matcher.find()) {
|
||||
String parsedVersion = matcher.group(1);
|
||||
if (Strings.hasText(parsedVersion)) {
|
||||
return Version.fromString(parsedVersion);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -556,7 +556,38 @@
|
|||
}
|
||||
},
|
||||
"fs": {
|
||||
"type": "object"
|
||||
"properties": {
|
||||
"data": {
|
||||
"properties": {
|
||||
"spins": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io_stats": {
|
||||
"properties": {
|
||||
"total": {
|
||||
"properties": {
|
||||
"operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"read_operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"write_operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"read_kilobytes": {
|
||||
"type": "long"
|
||||
},
|
||||
"write_kilobytes": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"type": "object"
|
||||
|
|
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.monitoring;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -21,7 +20,7 @@ public class MonitoringPluginClientTests extends ESTestCase {
|
|||
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), TransportClient.CLIENT_TYPE)
|
||||
.build();
|
||||
|
||||
Monitoring plugin = new Monitoring(settings, new Environment(settings), null);
|
||||
Monitoring plugin = new Monitoring(settings, null);
|
||||
assertThat(plugin.isEnabled(), is(true));
|
||||
assertThat(plugin.isTransportClient(), is(true));
|
||||
}
|
||||
|
@ -32,7 +31,7 @@ public class MonitoringPluginClientTests extends ESTestCase {
|
|||
.put("path.home", createTempDir())
|
||||
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), "node")
|
||||
.build();
|
||||
Monitoring plugin = new Monitoring(settings, new Environment(settings), null);
|
||||
Monitoring plugin = new Monitoring(settings, null);
|
||||
assertThat(plugin.isEnabled(), is(true));
|
||||
assertThat(plugin.isTransportClient(), is(false));
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
|
|||
doExporting();
|
||||
|
||||
logger.debug("--> templates does not exist: it should have been created in the current version");
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
for (String template : monitoringTemplateNames()) {
|
||||
assertTemplateExists(template);
|
||||
}
|
||||
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);
|
||||
|
@ -93,7 +93,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
|
|||
assertTemplateExists(indexTemplateName());
|
||||
|
||||
logger.debug("--> existing templates are old: new templates should be created");
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
for (String template : monitoringTemplateNames()) {
|
||||
assertTemplateExists(template);
|
||||
}
|
||||
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);
|
||||
|
@ -115,7 +115,7 @@ public abstract class AbstractExporterTemplateTestCase extends MonitoringIntegTe
|
|||
doExporting();
|
||||
|
||||
logger.debug("--> existing templates are up to date");
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
for (String template : monitoringTemplateNames()) {
|
||||
assertTemplateExists(template);
|
||||
}
|
||||
assertPipelineExists(Exporter.EXPORT_PIPELINE_NAME);
|
||||
|
|
|
@ -0,0 +1,221 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Base test helper for any {@link PublishableHttpResource}.
|
||||
*/
|
||||
public abstract class AbstractPublishableHttpResourceTestCase extends ESTestCase {
|
||||
|
||||
protected final String owner = getClass().getSimpleName();
|
||||
@Nullable
|
||||
protected final TimeValue masterTimeout = randomFrom(TimeValue.timeValueMinutes(5), null);
|
||||
|
||||
protected final RestClient client = mock(RestClient.class);
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} and assert that it returns
|
||||
* {@code true} given a {@link RestStatus} that is {@link RestStatus#OK}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
*/
|
||||
protected void assertCheckExists(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName)
|
||||
throws IOException {
|
||||
doCheckWithStatusCode(resource, resourceBasePath, resourceName, successfulCheckStatus(), CheckResponse.EXISTS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} and assert that it returns
|
||||
* {@code false} given a {@link RestStatus} that is not {@link RestStatus#OK}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
*/
|
||||
protected void assertCheckDoesNotExist(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName)
|
||||
throws IOException {
|
||||
doCheckWithStatusCode(resource, resourceBasePath, resourceName, notFoundCheckStatus(), CheckResponse.DOES_NOT_EXIST);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doCheck(RestClient) doCheck} against the {@code resource} that throws an exception and assert
|
||||
* that it returns {@code false}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
*/
|
||||
protected void assertCheckWithException(final PublishableHttpResource resource,
|
||||
final String resourceBasePath, final String resourceName)
|
||||
throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final ResponseException responseException = responseException("GET", endpoint, failedCheckStatus());
|
||||
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
|
||||
|
||||
when(client.performRequest("GET", endpoint, resource.getParameters())).thenThrow(e);
|
||||
|
||||
assertThat(resource.doCheck(client), is(CheckResponse.ERROR));
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} and assert that it returns
|
||||
* {@code true} given a {@link RestStatus} that is {@link RestStatus#OK} or {@link RestStatus#CREATED}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
* @param bodyType The request body provider's type.
|
||||
*/
|
||||
protected void assertPublishSucceeds(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
|
||||
final Class<? extends HttpEntity> bodyType)
|
||||
throws IOException {
|
||||
doPublishWithStatusCode(resource, resourceBasePath, resourceName, bodyType, successfulPublishStatus(), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} and assert that it returns
|
||||
* {@code false} given a {@link RestStatus} that is neither {@link RestStatus#OK} or {@link RestStatus#CREATED}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
* @param bodyType The request body provider's type.
|
||||
*/
|
||||
protected void assertPublishFails(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
|
||||
final Class<? extends HttpEntity> bodyType)
|
||||
throws IOException {
|
||||
doPublishWithStatusCode(resource, resourceBasePath, resourceName, bodyType, failedPublishStatus(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform {@link PublishableHttpResource#doPublish(RestClient) doPublish} against the {@code resource} that throws an exception and
|
||||
* assert that it returns {@code false}.
|
||||
*
|
||||
* @param resource The resource to execute.
|
||||
* @param resourceBasePath The base endpoint (e.g., "/_template")
|
||||
* @param resourceName The resource name (e.g., the template or pipeline name).
|
||||
*/
|
||||
protected void assertPublishWithException(final PublishableHttpResource resource,
|
||||
final String resourceBasePath, final String resourceName,
|
||||
final Class<? extends HttpEntity> bodyType)
|
||||
throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"));
|
||||
|
||||
when(client.performRequest(eq("PUT"), eq(endpoint), eq(resource.getParameters()), any(bodyType))).thenThrow(e);
|
||||
|
||||
assertThat(resource.doPublish(client), is(false));
|
||||
}
|
||||
|
||||
protected void assertParameters(final PublishableHttpResource resource) {
|
||||
final Map<String, String> parameters = resource.getParameters();
|
||||
|
||||
if (masterTimeout != null) {
|
||||
assertThat(parameters.get("master_timeout"), is(masterTimeout.toString()));
|
||||
}
|
||||
|
||||
assertThat(parameters.get("filter_path"), is("$NONE"));
|
||||
}
|
||||
|
||||
private void doCheckWithStatusCode(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
|
||||
final RestStatus status,
|
||||
final CheckResponse expected)
|
||||
throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Response response = response("GET", endpoint, status);
|
||||
|
||||
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
|
||||
|
||||
assertThat(resource.doCheck(client), is(expected));
|
||||
}
|
||||
|
||||
private void doPublishWithStatusCode(final PublishableHttpResource resource, final String resourceBasePath, final String resourceName,
|
||||
final Class<? extends HttpEntity> bodyType,
|
||||
final RestStatus status,
|
||||
final boolean expected)
|
||||
throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Response response = response("GET", endpoint, status);
|
||||
|
||||
when(client.performRequest(eq("PUT"), eq(endpoint), eq(resource.getParameters()), any(bodyType))).thenReturn(response);
|
||||
|
||||
assertThat(resource.doPublish(client), is(expected));
|
||||
}
|
||||
|
||||
protected RestStatus successfulCheckStatus() {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
|
||||
protected RestStatus notFoundCheckStatus() {
|
||||
return RestStatus.NOT_FOUND;
|
||||
}
|
||||
|
||||
protected RestStatus failedCheckStatus() {
|
||||
final Predicate<RestStatus> ignoreStatus = (final RestStatus status) -> status == RestStatus.OK || status == RestStatus.NOT_FOUND;
|
||||
return randomValueOtherThanMany(ignoreStatus, () -> randomFrom(RestStatus.values()));
|
||||
}
|
||||
|
||||
protected RestStatus successfulPublishStatus() {
|
||||
return randomFrom(RestStatus.OK, RestStatus.CREATED);
|
||||
}
|
||||
|
||||
protected RestStatus failedPublishStatus() {
|
||||
final Predicate<RestStatus> ignoreStatus = (final RestStatus status) -> status == RestStatus.OK || status == RestStatus.CREATED;
|
||||
return randomValueOtherThanMany(ignoreStatus, () -> randomFrom(RestStatus.values()));
|
||||
}
|
||||
|
||||
protected String concatenateEndpoint(final String resourceBasePath, final String resourceName) {
|
||||
return resourceBasePath + "/" + resourceName;
|
||||
}
|
||||
|
||||
protected Response response(final String method, final String endpoint, final RestStatus status) {
|
||||
final Response response = mock(Response.class);
|
||||
// fill out the response enough so that the exception can be constructed
|
||||
final RequestLine requestLine = mock(RequestLine.class);
|
||||
when(requestLine.getMethod()).thenReturn(method);
|
||||
when(requestLine.getUri()).thenReturn(endpoint);
|
||||
final StatusLine statusLine = mock(StatusLine.class);
|
||||
when(statusLine.getStatusCode()).thenReturn(status.getStatus());
|
||||
|
||||
when(response.getRequestLine()).thenReturn(requestLine);
|
||||
when(response.getStatusLine()).thenReturn(statusLine);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
protected ResponseException responseException(final String method, final String endpoint, final RestStatus status) {
|
||||
try {
|
||||
return new ResponseException(response(method, endpoint, status));
|
||||
} catch (final IOException e) {
|
||||
throw new IllegalStateException("update responseException to properly build the ResponseException", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link HttpExportBulkResponseListener}.
|
||||
*/
|
||||
public class HttpExportBulkResponseListenerTests extends ESTestCase {
|
||||
|
||||
public void testOnSuccess() throws IOException {
|
||||
final Response response = mock(Response.class);
|
||||
final StringEntity entity = new StringEntity("{\"took\":5,\"errors\":false}", ContentType.APPLICATION_JSON);
|
||||
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
|
||||
// doesn't explode
|
||||
new WarningsHttpExporterBulkResponseListener().onSuccess(response);
|
||||
}
|
||||
|
||||
public void testOnSuccessParsing() throws IOException {
|
||||
// {"took": 4, "errors": false, ...
|
||||
final Response response = mock(Response.class);
|
||||
final XContent xContent = mock(XContent.class);
|
||||
final XContentParser parser = mock(XContentParser.class);
|
||||
final HttpEntity entity = mock(HttpEntity.class);
|
||||
final InputStream stream = mock(InputStream.class);
|
||||
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
when(entity.getContent()).thenReturn(stream);
|
||||
when(xContent.createParser(stream)).thenReturn(parser);
|
||||
|
||||
// {, "took", 4, "errors", false
|
||||
when(parser.nextToken()).thenReturn(Token.START_OBJECT,
|
||||
Token.FIELD_NAME, Token.VALUE_NUMBER,
|
||||
Token.FIELD_NAME, Token.VALUE_BOOLEAN);
|
||||
when(parser.currentName()).thenReturn("took", "errors");
|
||||
when(parser.booleanValue()).thenReturn(false);
|
||||
|
||||
new HttpExportBulkResponseListener(xContent).onSuccess(response);
|
||||
|
||||
verify(parser, times(5)).nextToken();
|
||||
verify(parser, times(2)).currentName();
|
||||
verify(parser).booleanValue();
|
||||
}
|
||||
|
||||
public void testOnSuccessWithInnerErrors() {
|
||||
final String[] expectedErrors = new String[] { randomAsciiOfLengthBetween(4, 10), randomAsciiOfLengthBetween(5, 9) };
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final Response response = mock(Response.class);
|
||||
final StringEntity entity = new StringEntity(
|
||||
"{\"took\":4,\"errors\":true,\"items\":[" +
|
||||
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"123\"}}," +
|
||||
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"456\"," +
|
||||
"\"error\":\"" + expectedErrors[0] + "\"}}," +
|
||||
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"789\"}}," +
|
||||
"{\"index\":{\"_index\":\".monitoring-data-2\",\"_type\":\"node\",\"_id\":\"012\"," +
|
||||
"\"error\":\"" + expectedErrors[1] + "\"}}" +
|
||||
"]}",
|
||||
ContentType.APPLICATION_JSON);
|
||||
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
|
||||
// doesn't explode
|
||||
new WarningsHttpExporterBulkResponseListener() {
|
||||
@Override
|
||||
void onItemError(final String text) {
|
||||
assertEquals(expectedErrors[counter.getAndIncrement()], text);
|
||||
}
|
||||
}.onSuccess(response);
|
||||
|
||||
assertEquals(expectedErrors.length, counter.get());
|
||||
}
|
||||
|
||||
public void testOnSuccessParsingWithInnerErrors() throws IOException {
|
||||
// {"took": 4, "errors": true, "items": [ { "index": { "_index": "ignored", "_type": "ignored", "_id": "ignored" },
|
||||
// { "index": { "_index": "ignored", "_type": "ignored", "_id": "ignored", "error": "blah" }
|
||||
// ]...
|
||||
final Response response = mock(Response.class);
|
||||
final XContent xContent = mock(XContent.class);
|
||||
final XContentParser parser = mock(XContentParser.class);
|
||||
final HttpEntity entity = mock(HttpEntity.class);
|
||||
final InputStream stream = mock(InputStream.class);
|
||||
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
when(entity.getContent()).thenReturn(stream);
|
||||
when(xContent.createParser(stream)).thenReturn(parser);
|
||||
|
||||
// {, "took", 4, "errors", false nextToken, currentName
|
||||
when(parser.nextToken()).thenReturn(Token.START_OBJECT, // 1
|
||||
Token.FIELD_NAME, Token.VALUE_NUMBER, // 3, 1
|
||||
Token.FIELD_NAME, Token.VALUE_BOOLEAN, // 5, 2
|
||||
Token.FIELD_NAME, Token.START_ARRAY, // 7, 3
|
||||
// no error:
|
||||
Token.START_OBJECT, // 8
|
||||
Token.FIELD_NAME, Token.START_OBJECT, // 10, 4
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 12, 5
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 14, 6
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 16, 7
|
||||
Token.END_OBJECT, // 17
|
||||
Token.START_OBJECT, // 18
|
||||
Token.FIELD_NAME, Token.START_OBJECT, // 20, 8
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 22, 9
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 24, 10
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 26, 11
|
||||
Token.FIELD_NAME, Token.VALUE_STRING, // 28, 12 ("error")
|
||||
Token.END_OBJECT, // 29
|
||||
Token.END_ARRAY); // 30
|
||||
when(parser.currentName()).thenReturn("took", "errors", "items",
|
||||
"index", "_index", "_type", "_id",
|
||||
"index", "_index", "_type", "_id", "error");
|
||||
// there were errors; so go diving for the error
|
||||
when(parser.booleanValue()).thenReturn(true);
|
||||
when(parser.text()).thenReturn("this is the error");
|
||||
|
||||
new HttpExportBulkResponseListener(xContent).onSuccess(response);
|
||||
|
||||
verify(parser, times(30)).nextToken();
|
||||
verify(parser, times(12)).currentName();
|
||||
verify(parser).booleanValue();
|
||||
verify(parser).text();
|
||||
}
|
||||
|
||||
public void testOnSuccessMalformed() {
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final Response response = mock(Response.class);
|
||||
|
||||
if (randomBoolean()) {
|
||||
// malformed JSON
|
||||
when(response.getEntity()).thenReturn(new StringEntity("{", ContentType.APPLICATION_JSON));
|
||||
}
|
||||
|
||||
new WarningsHttpExporterBulkResponseListener() {
|
||||
@Override
|
||||
void onError(final String msg, final Throwable cause) {
|
||||
counter.getAndIncrement();
|
||||
}
|
||||
}.onSuccess(response);
|
||||
|
||||
assertEquals(1, counter.get());
|
||||
}
|
||||
|
||||
public void testOnFailure() {
|
||||
final Exception exception = randomBoolean() ? new Exception() : new RuntimeException();
|
||||
|
||||
new WarningsHttpExporterBulkResponseListener() {
|
||||
@Override
|
||||
void onError(final String msg, final Throwable cause) {
|
||||
assertSame(exception, cause);
|
||||
}
|
||||
}.onFailure(exception);
|
||||
}
|
||||
|
||||
private static class WarningsHttpExporterBulkResponseListener extends HttpExportBulkResponseListener {
|
||||
|
||||
WarningsHttpExporterBulkResponseListener() {
|
||||
super(XContentType.JSON.xContent());
|
||||
}
|
||||
|
||||
@Override
|
||||
void onItemError(final String msg) {
|
||||
fail("There should be no errors within the response!");
|
||||
}
|
||||
|
||||
@Override
|
||||
void onError(final String msg, final Throwable cause) {
|
||||
super.onError(msg, cause); // let it log the exception so you can check the output
|
||||
|
||||
fail("There should be no errors!");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,590 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import com.squareup.okhttp.mockwebserver.MockResponse;
|
||||
import com.squareup.okhttp.mockwebserver.MockWebServer;
|
||||
import com.squareup.okhttp.mockwebserver.RecordedRequest;
|
||||
import okio.Buffer;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoredSystem;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStateMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.bulk.MonitoringBulkTimestampedResolver;
|
||||
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLEncoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.FILTER_PATH_NONE;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
|
||||
public class HttpExporterIT extends MonitoringIntegTestCase {
|
||||
|
||||
private MockWebServerContainer webServerContainer;
|
||||
private MockWebServer webServer;
|
||||
|
||||
@Before
|
||||
public void startWebServer() {
|
||||
webServerContainer = new MockWebServerContainer();
|
||||
webServer = webServerContainer.getWebServer();
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopWebServer() throws Exception {
|
||||
webServer.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean ignoreExternalCluster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void testExport() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
final int nbDocs = randomIntBetween(1, 25);
|
||||
export(newRandomMonitoringDocs(nbDocs));
|
||||
|
||||
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
assertBulk(webServer, nbDocs);
|
||||
}
|
||||
|
||||
public void testExportWithHeaders() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
|
||||
final String headerValue = randomAsciiOfLengthBetween(3, 9);
|
||||
final String[] array = generateRandomStringArray(2, 4, false);
|
||||
|
||||
final Map<String, String[]> headers = new HashMap<>();
|
||||
|
||||
headers.put("X-Cloud-Cluster", new String[] { headerValue });
|
||||
headers.put("X-Found-Cluster", new String[] { headerValue });
|
||||
headers.put("Array-Check", array);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress())
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
|
||||
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
final int nbDocs = randomIntBetween(1, 25);
|
||||
export(newRandomMonitoringDocs(nbDocs));
|
||||
|
||||
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready, headers, null);
|
||||
assertBulk(webServer, nbDocs, headers, null);
|
||||
}
|
||||
|
||||
public void testExportWithBasePath() throws Exception {
|
||||
final boolean useHeaders = randomBoolean();
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
|
||||
final String headerValue = randomAsciiOfLengthBetween(3, 9);
|
||||
final String[] array = generateRandomStringArray(2, 4, false);
|
||||
|
||||
final Map<String, String[]> headers = new HashMap<>();
|
||||
|
||||
if (useHeaders) {
|
||||
headers.put("X-Cloud-Cluster", new String[] { headerValue });
|
||||
headers.put("X-Found-Cluster", new String[] { headerValue });
|
||||
headers.put("Array-Check", array);
|
||||
}
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false}");
|
||||
|
||||
String basePath = "path/to";
|
||||
|
||||
if (randomBoolean()) {
|
||||
basePath += "/something";
|
||||
|
||||
if (rarely()) {
|
||||
basePath += "/proxied";
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
basePath = "/" + basePath;
|
||||
}
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress())
|
||||
.put("xpack.monitoring.exporters._http.proxy.base_path", basePath + (randomBoolean() ? "/" : ""));
|
||||
|
||||
if (useHeaders) {
|
||||
builder
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
|
||||
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
|
||||
}
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
final int nbDocs = randomIntBetween(1, 25);
|
||||
export(newRandomMonitoringDocs(nbDocs));
|
||||
|
||||
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready, headers, basePath);
|
||||
assertBulk(webServer, nbDocs, headers, basePath);
|
||||
}
|
||||
|
||||
public void testHostChangeReChecksTemplate() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false}");
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
export(Collections.singletonList(newRandomMonitoringDoc()));
|
||||
|
||||
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
assertBulk(webServer);
|
||||
|
||||
try (final MockWebServerContainer secondWebServerContainer = new MockWebServerContainer(webServerContainer.getPort() + 1)) {
|
||||
final MockWebServer secondWebServer = secondWebServerContainer.getWebServer();
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
Settings.builder().putArray("xpack.monitoring.exporters._http.host", secondWebServerContainer.getFormattedAddress())));
|
||||
|
||||
enqueueGetClusterVersionResponse(secondWebServer, Version.CURRENT);
|
||||
// pretend that one of the templates is missing
|
||||
for (Tuple<String, String> template : monitoringTemplates()) {
|
||||
if (template.v1().contains(MonitoringBulkTimestampedResolver.Data.DATA)) {
|
||||
enqueueResponse(secondWebServer, 200, "template [" + template + "] exists");
|
||||
} else {
|
||||
enqueueResponse(secondWebServer, 404, "template [" + template + "] does not exist");
|
||||
enqueueResponse(secondWebServer, 201, "template [" + template + "] created");
|
||||
}
|
||||
}
|
||||
// opposite of if it existed before
|
||||
enqueuePipelineResponses(secondWebServer, !pipelineExistsAlready);
|
||||
enqueueResponse(secondWebServer, 200, "{\"errors\": false}");
|
||||
|
||||
logger.info("--> exporting a second event");
|
||||
export(Collections.singletonList(newRandomMonitoringDoc()));
|
||||
|
||||
assertMonitorVersion(secondWebServer);
|
||||
|
||||
for (Tuple<String, String> template : monitoringTemplates()) {
|
||||
RecordedRequest recordedRequest = secondWebServer.takeRequest();
|
||||
assertThat(recordedRequest.getMethod(), equalTo("GET"));
|
||||
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.v1() + resourceQueryString()));
|
||||
|
||||
if (template.v1().contains(MonitoringBulkTimestampedResolver.Data.DATA) == false) {
|
||||
recordedRequest = secondWebServer.takeRequest();
|
||||
assertThat(recordedRequest.getMethod(), equalTo("PUT"));
|
||||
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.v1() + resourceQueryString()));
|
||||
assertThat(recordedRequest.getBody().readUtf8(), equalTo(template.v2()));
|
||||
}
|
||||
}
|
||||
assertMonitorPipelines(secondWebServer, !pipelineExistsAlready, null, null);
|
||||
assertBulk(secondWebServer);
|
||||
}
|
||||
}
|
||||
|
||||
public void testUnsupportedClusterVersion() throws Exception {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
|
||||
|
||||
// returning an unsupported cluster version
|
||||
enqueueGetClusterVersionResponse(randomFrom(Version.fromString("0.18.0"), Version.fromString("1.0.0"),
|
||||
Version.fromString("1.4.0"), Version.fromString("2.4.0")));
|
||||
|
||||
String agentNode = internalCluster().startNode(builder);
|
||||
|
||||
// fire off what should be an unsuccessful request
|
||||
assertNull(getExporter(agentNode).openBulk());
|
||||
|
||||
assertThat(webServer.getRequestCount(), equalTo(1));
|
||||
|
||||
assertMonitorVersion(webServer);
|
||||
}
|
||||
|
||||
public void testDynamicIndexFormatChange() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServerContainer.getFormattedAddress());
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
MonitoringDoc doc = newRandomMonitoringDoc();
|
||||
export(Collections.singletonList(doc));
|
||||
|
||||
assertMonitorResources(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
RecordedRequest recordedRequest = assertBulk(webServer);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
String indexName = new ResolversRegistry(Settings.EMPTY).getResolver(doc).index(doc);
|
||||
|
||||
byte[] bytes = recordedRequest.getBody().readByteArray();
|
||||
Map<String, Object> data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> index = (Map<String, Object>) data.get("index");
|
||||
assertThat(index.get("_index"), equalTo(indexName));
|
||||
|
||||
String newTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM");
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.index.name.time_format", newTimeFormat)));
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, true, true);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
doc = newRandomMonitoringDoc();
|
||||
export(Collections.singletonList(doc));
|
||||
|
||||
String expectedMonitoringIndex = ".monitoring-es-" + MonitoringTemplateUtils.TEMPLATE_VERSION + "-"
|
||||
+ DateTimeFormat.forPattern(newTimeFormat).withZoneUTC().print(doc.getTimestamp());
|
||||
|
||||
assertMonitorResources(webServer, true, true);
|
||||
recordedRequest = assertBulk(webServer);
|
||||
|
||||
bytes = recordedRequest.getBody().readByteArray();
|
||||
data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
|
||||
@SuppressWarnings("unchecked")
|
||||
final Map<String, Object> newIndex = (Map<String, Object>) data.get("index");
|
||||
assertThat(newIndex.get("_index"), equalTo(expectedMonitoringIndex));
|
||||
}
|
||||
|
||||
private void assertMonitorVersion(final MockWebServer webServer) throws Exception {
|
||||
assertMonitorVersion(webServer, null, null);
|
||||
}
|
||||
|
||||
private void assertMonitorVersion(final MockWebServer webServer,
|
||||
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
|
||||
throws Exception {
|
||||
final String pathPrefix = basePathToAssertablePrefix(basePath);
|
||||
final RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo(pathPrefix + "/?filter_path=version.number"));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
|
||||
private void assertMonitorResources(final MockWebServer webServer,
|
||||
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists)
|
||||
throws Exception {
|
||||
assertMonitorResources(webServer, templateAlreadyExists, pipelineAlreadyExists, null, null);
|
||||
}
|
||||
|
||||
private void assertMonitorResources(final MockWebServer webServer,
|
||||
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
|
||||
throws Exception {
|
||||
assertMonitorVersion(webServer, customHeaders, basePath);
|
||||
assertMonitorTemplates(webServer, templateAlreadyExists, customHeaders, basePath);
|
||||
assertMonitorPipelines(webServer, pipelineAlreadyExists, customHeaders, basePath);
|
||||
}
|
||||
|
||||
private void assertMonitorTemplates(final MockWebServer webServer, final boolean alreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
|
||||
throws Exception {
|
||||
final String pathPrefix = basePathToAssertablePrefix(basePath);
|
||||
RecordedRequest request;
|
||||
|
||||
for (Tuple<String, String> template : monitoringTemplates()) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo(pathPrefix + "/_template/" + template.v1() + resourceQueryString()));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (alreadyExists == false) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("PUT"));
|
||||
assertThat(request.getPath(), equalTo(pathPrefix + "/_template/" + template.v1() + resourceQueryString()));
|
||||
assertThat(request.getBody().readUtf8(), equalTo(template.v2()));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertMonitorPipelines(final MockWebServer webServer, final boolean alreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
|
||||
throws Exception {
|
||||
final String pathPrefix = basePathToAssertablePrefix(basePath);
|
||||
RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo(pathPrefix + "/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME + resourceQueryString()));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (alreadyExists == false) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("PUT"));
|
||||
assertThat(request.getPath(),
|
||||
equalTo(pathPrefix + "/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME + resourceQueryString()));
|
||||
assertThat(request.getBody().readUtf8(), equalTo(Exporter.emptyPipeline(XContentType.JSON).string()));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
}
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer) throws Exception {
|
||||
return assertBulk(webServer, -1);
|
||||
}
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs) throws Exception {
|
||||
return assertBulk(webServer, docs, null, null);
|
||||
}
|
||||
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs,
|
||||
@Nullable final Map<String, String[]> customHeaders, @Nullable final String basePath)
|
||||
throws Exception {
|
||||
final String pathPrefix = basePathToAssertablePrefix(basePath);
|
||||
final RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getPath(), equalTo(pathPrefix + "/_bulk" + bulkQueryString()));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (docs != -1) {
|
||||
assertBulkRequest(request.getBody(), docs);
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
private void assertHeaders(final RecordedRequest request, final Map<String, String[]> customHeaders) {
|
||||
if (customHeaders != null) {
|
||||
for (final Map.Entry<String, String[]> entry : customHeaders.entrySet()) {
|
||||
final String header = entry.getKey();
|
||||
final String[] values = entry.getValue();
|
||||
|
||||
final List<String> headerValues = request.getHeaders().values(header);
|
||||
|
||||
assertThat(header, headerValues, hasSize(values.length));
|
||||
assertThat(header, headerValues, containsInAnyOrder(values));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void export(Collection<MonitoringDoc> docs) throws Exception {
|
||||
Exporters exporters = internalCluster().getInstance(Exporters.class);
|
||||
assertThat(exporters, notNullValue());
|
||||
|
||||
// Wait for exporting bulks to be ready to export
|
||||
assertBusy(() -> exporters.forEach(exporter -> assertThat(exporter.openBulk(), notNullValue())));
|
||||
exporters.export(docs);
|
||||
}
|
||||
|
||||
private HttpExporter getExporter(String nodeName) {
|
||||
Exporters exporters = internalCluster().getInstance(Exporters.class, nodeName);
|
||||
return (HttpExporter) exporters.iterator().next();
|
||||
}
|
||||
|
||||
private MonitoringDoc newRandomMonitoringDoc() {
|
||||
if (randomBoolean()) {
|
||||
IndexRecoveryMonitoringDoc doc = new IndexRecoveryMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
|
||||
doc.setClusterUUID(internalCluster().getClusterName());
|
||||
doc.setTimestamp(System.currentTimeMillis());
|
||||
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
|
||||
doc.setRecoveryResponse(new RecoveryResponse());
|
||||
return doc;
|
||||
} else {
|
||||
ClusterStateMonitoringDoc doc = new ClusterStateMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
|
||||
doc.setClusterUUID(internalCluster().getClusterName());
|
||||
doc.setTimestamp(System.currentTimeMillis());
|
||||
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
|
||||
doc.setClusterState(ClusterState.PROTO);
|
||||
doc.setStatus(ClusterHealthStatus.GREEN);
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
|
||||
private List<MonitoringDoc> newRandomMonitoringDocs(int nb) {
|
||||
List<MonitoringDoc> docs = new ArrayList<>(nb);
|
||||
for (int i = 0; i < nb; i++) {
|
||||
docs.add(newRandomMonitoringDoc());
|
||||
}
|
||||
return docs;
|
||||
}
|
||||
|
||||
private String basePathToAssertablePrefix(@Nullable final String basePath) {
|
||||
if (basePath == null) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return basePath.startsWith("/") == false ? "/" + basePath : basePath;
|
||||
}
|
||||
|
||||
private String resourceQueryString() {
|
||||
return "?filter_path=" + urlEncode(FILTER_PATH_NONE);
|
||||
}
|
||||
|
||||
private String bulkQueryString() {
|
||||
return "?pipeline=" + urlEncode(Exporter.EXPORT_PIPELINE_NAME) + "&filter_path=" + urlEncode("errors,items.*.error");
|
||||
}
|
||||
|
||||
private String urlEncode(final String value) {
|
||||
try {
|
||||
return URLEncoder.encode(value, "UTF-8");
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
// whelp, our JVM is broken
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueueGetClusterVersionResponse(Version v) throws IOException {
|
||||
enqueueGetClusterVersionResponse(webServer, v);
|
||||
}
|
||||
|
||||
private void enqueueGetClusterVersionResponse(MockWebServer mockWebServer, Version v) throws IOException {
|
||||
mockWebServer.enqueue(new MockResponse().setResponseCode(200).setBody(
|
||||
jsonBuilder()
|
||||
.startObject().startObject("version").field("number", v.toString()).endObject().endObject().bytes()
|
||||
.utf8ToString()));
|
||||
}
|
||||
|
||||
private void enqueueTemplateAndPipelineResponses(final MockWebServer webServer,
|
||||
final boolean templatesAlreadyExists, final boolean pipelineAlreadyExists)
|
||||
throws IOException {
|
||||
enqueueTemplateResponses(webServer, templatesAlreadyExists);
|
||||
enqueuePipelineResponses(webServer, pipelineAlreadyExists);
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
|
||||
if (alreadyExists) {
|
||||
enqueueTemplateResponsesExistsAlready(webServer);
|
||||
} else {
|
||||
enqueueTemplateResponsesDoesNotExistYet(webServer);
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
|
||||
for (String template : monitoringTemplateNames()) {
|
||||
enqueueResponse(webServer, 404, "template [" + template + "] does not exist");
|
||||
enqueueResponse(webServer, 201, "template [" + template + "] created");
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponsesExistsAlready(final MockWebServer webServer) throws IOException {
|
||||
for (String template : monitoringTemplateNames()) {
|
||||
enqueueResponse(webServer, 200, "template [" + template + "] exists");
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
|
||||
if (alreadyExists) {
|
||||
enqueuePipelineResponsesExistsAlready(webServer);
|
||||
} else {
|
||||
enqueuePipelineResponsesDoesNotExistYet(webServer);
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
|
||||
enqueueResponse(webServer, 404, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] does not exist");
|
||||
enqueueResponse(webServer, 201, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] created");
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponsesExistsAlready(final MockWebServer webServer) throws IOException {
|
||||
enqueueResponse(webServer, 200, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] exists");
|
||||
}
|
||||
|
||||
private void enqueueResponse(int responseCode, String body) throws IOException {
|
||||
enqueueResponse(webServer, responseCode, body);
|
||||
}
|
||||
|
||||
private void enqueueResponse(MockWebServer mockWebServer, int responseCode, String body) throws IOException {
|
||||
mockWebServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body));
|
||||
}
|
||||
|
||||
private void assertBulkRequest(Buffer requestBody, int numberOfActions) throws Exception {
|
||||
BulkRequest bulkRequest = Requests.bulkRequest().add(new BytesArray(requestBody.readByteArray()), null, null);
|
||||
assertThat(bulkRequest.numberOfActions(), equalTo(numberOfActions));
|
||||
for (ActionRequest actionRequest : bulkRequest.requests()) {
|
||||
assertThat(actionRequest, instanceOf(IndexRequest.class));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,382 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Matchers.startsWith;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link HttpExporter} explicitly for its resource handling.
|
||||
*/
|
||||
public class HttpExporterResourceTests extends AbstractPublishableHttpResourceTestCase {
|
||||
|
||||
private final int EXPECTED_TEMPLATES = 3;
|
||||
|
||||
private final RestClient client = mock(RestClient.class);
|
||||
private final Response versionResponse = mock(Response.class);
|
||||
|
||||
private final MultiHttpResource resources =
|
||||
HttpExporter.createResources(new Exporter.Config("_http", "http", Settings.EMPTY), new ResolversRegistry(Settings.EMPTY));
|
||||
|
||||
public void testInvalidVersionBlocks() throws IOException {
|
||||
final HttpEntity entity = new StringEntity("{\"version\":{\"number\":\"unknown\"}}", ContentType.APPLICATION_JSON);
|
||||
|
||||
when(versionResponse.getEntity()).thenReturn(entity);
|
||||
when(client.performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class))).thenReturn(versionResponse);
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
assertFalse(resources.checkAndPublish(client));
|
||||
// ensure it didn't magically become clean
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
public void testTemplateCheckBlocksAfterSuccessfulVersion() throws IOException {
|
||||
final Exception exception = failureGetException();
|
||||
final boolean firstSucceeds = randomBoolean();
|
||||
int expectedGets = 1;
|
||||
int expectedPuts = 0;
|
||||
|
||||
whenValidVersionResponse();
|
||||
|
||||
// failure in the middle of various templates being checked/published; suggests a node dropped
|
||||
if (firstSucceeds) {
|
||||
final boolean successfulFirst = randomBoolean();
|
||||
// -2 from one success + a necessary failure after it!
|
||||
final int extraPasses = randomIntBetween(0, EXPECTED_TEMPLATES - 2);
|
||||
final int successful = randomIntBetween(0, extraPasses);
|
||||
final int unsuccessful = extraPasses - successful;
|
||||
|
||||
final Response first = successfulFirst ? successfulGetResponse() : unsuccessfulGetResponse();
|
||||
|
||||
final List<Response> otherResponses = getResponses(successful, unsuccessful);
|
||||
|
||||
// last check fails implies that N - 2 publishes succeeded!
|
||||
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
|
||||
.thenReturn(first, otherResponses.toArray(new Response[otherResponses.size()]))
|
||||
.thenThrow(exception);
|
||||
whenSuccessfulPutTemplates(otherResponses.size() + 1);
|
||||
|
||||
expectedGets += 1 + successful + unsuccessful;
|
||||
expectedPuts = (successfulFirst ? 0 : 1) + unsuccessful;
|
||||
} else {
|
||||
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
|
||||
.thenThrow(exception);
|
||||
}
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
assertFalse(resources.checkAndPublish(client));
|
||||
// ensure it didn't magically become
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyGetTemplates(expectedGets);
|
||||
verifyPutTemplates(expectedPuts);
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
public void testTemplatePublishBlocksAfterSuccessfulVersion() throws IOException {
|
||||
final Exception exception = failurePutException();
|
||||
final boolean firstSucceeds = randomBoolean();
|
||||
int expectedGets = 1;
|
||||
int expectedPuts = 1;
|
||||
|
||||
whenValidVersionResponse();
|
||||
|
||||
// failure in the middle of various templates being checked/published; suggests a node dropped
|
||||
if (firstSucceeds) {
|
||||
final Response firstSuccess = successfulPutResponse();
|
||||
// -2 from one success + a necessary failure after it!
|
||||
final int extraPasses = randomIntBetween(0, EXPECTED_TEMPLATES - 2);
|
||||
final int successful = randomIntBetween(0, extraPasses);
|
||||
final int unsuccessful = extraPasses - successful;
|
||||
|
||||
final List<Response> otherResponses = successfulPutResponses(unsuccessful);
|
||||
|
||||
// first one passes for sure, so we need an extra "unsuccessful" GET
|
||||
whenGetTemplates(successful, unsuccessful + 2);
|
||||
|
||||
// previous publishes must have succeeded
|
||||
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
|
||||
.thenReturn(firstSuccess, otherResponses.toArray(new Response[otherResponses.size()]))
|
||||
.thenThrow(exception);
|
||||
|
||||
// GETs required for each PUT attempt (first is guaranteed "unsuccessful")
|
||||
expectedGets += successful + unsuccessful + 1;
|
||||
// unsuccessful are PUT attempts + the guaranteed successful PUT (first)
|
||||
expectedPuts += unsuccessful + 1;
|
||||
} else {
|
||||
// fail the check so that it has to attempt the PUT
|
||||
whenGetTemplates(0, 1);
|
||||
|
||||
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
|
||||
.thenThrow(exception);
|
||||
}
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
assertFalse(resources.checkAndPublish(client));
|
||||
// ensure it didn't magically become
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyGetTemplates(expectedGets);
|
||||
verifyPutTemplates(expectedPuts);
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
public void testPipelineCheckBlocksAfterSuccessfulTemplates() throws IOException {
|
||||
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
|
||||
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
|
||||
final Exception exception = failureGetException();
|
||||
|
||||
whenValidVersionResponse();
|
||||
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
|
||||
whenSuccessfulPutTemplates(EXPECTED_TEMPLATES);
|
||||
|
||||
// we only expect a single pipeline for now
|
||||
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
|
||||
.thenThrow(exception);
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
assertFalse(resources.checkAndPublish(client));
|
||||
// ensure it didn't magically become
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyGetTemplates(EXPECTED_TEMPLATES);
|
||||
verifyPutTemplates(unsuccessfulGetTemplates);
|
||||
verifyGetPipelines(1);
|
||||
verifyPutPipelines(0);
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
public void testPipelinePublishBlocksAfterSuccessfulTemplates() throws IOException {
|
||||
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
|
||||
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
|
||||
final Exception exception = failurePutException();
|
||||
|
||||
whenValidVersionResponse();
|
||||
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
|
||||
whenSuccessfulPutTemplates(EXPECTED_TEMPLATES);
|
||||
// pipeline can't be there
|
||||
whenGetPipelines(0, 1);
|
||||
|
||||
// we only expect a single pipeline for now
|
||||
when(client.performRequest(eq("PUT"),
|
||||
startsWith("/_ingest/pipeline/"),
|
||||
anyMapOf(String.class, String.class),
|
||||
any(HttpEntity.class)))
|
||||
.thenThrow(exception);
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
assertFalse(resources.checkAndPublish(client));
|
||||
// ensure it didn't magically become
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyGetTemplates(EXPECTED_TEMPLATES);
|
||||
verifyPutTemplates(unsuccessfulGetTemplates);
|
||||
verifyGetPipelines(1);
|
||||
verifyPutPipelines(1);
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
public void testSuccessfulChecks() throws IOException {
|
||||
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
|
||||
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;
|
||||
final int successfulGetPipelines = randomIntBetween(0, 1);
|
||||
final int unsuccessfulGetPipelines = 1 - successfulGetPipelines;
|
||||
|
||||
whenValidVersionResponse();
|
||||
whenGetTemplates(successfulGetTemplates, unsuccessfulGetTemplates);
|
||||
whenSuccessfulPutTemplates(unsuccessfulGetTemplates);
|
||||
whenGetPipelines(successfulGetPipelines, unsuccessfulGetPipelines);
|
||||
whenSuccessfulPutPipelines(1);
|
||||
|
||||
assertTrue(resources.isDirty());
|
||||
|
||||
// it should be able to proceed!
|
||||
assertTrue(resources.checkAndPublish(client));
|
||||
assertFalse(resources.isDirty());
|
||||
|
||||
verifyVersionCheck();
|
||||
verifyGetTemplates(EXPECTED_TEMPLATES);
|
||||
verifyPutTemplates(unsuccessfulGetTemplates);
|
||||
verifyGetPipelines(1);
|
||||
verifyPutPipelines(unsuccessfulGetPipelines);
|
||||
verifyNoMoreInteractions(client);
|
||||
}
|
||||
|
||||
private Exception failureGetException() {
|
||||
final ResponseException responseException = responseException("GET", "/_get_something", failedCheckStatus());
|
||||
|
||||
return randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
|
||||
}
|
||||
|
||||
private Exception failurePutException() {
|
||||
final ResponseException responseException = responseException("PUT", "/_put_something", failedPublishStatus());
|
||||
|
||||
return randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
|
||||
}
|
||||
|
||||
private Response successfulGetResponse() {
|
||||
return response("GET", "/_get_something", successfulCheckStatus());
|
||||
}
|
||||
|
||||
private Response unsuccessfulGetResponse() {
|
||||
return response("GET", "/_get_something", notFoundCheckStatus());
|
||||
}
|
||||
|
||||
private List<Response> getResponses(final int successful, final int unsuccessful) {
|
||||
final List<Response> responses = new ArrayList<>(successful);
|
||||
|
||||
for (int i = 0; i < successful; ++i) {
|
||||
responses.add(successfulGetResponse());
|
||||
}
|
||||
|
||||
for (int i = 0; i < unsuccessful; ++i) {
|
||||
responses.add(unsuccessfulGetResponse());
|
||||
}
|
||||
|
||||
return responses;
|
||||
}
|
||||
|
||||
private Response successfulPutResponse() {
|
||||
final Response response = mock(Response.class);
|
||||
final StatusLine statusLine = mock(StatusLine.class);
|
||||
|
||||
when(response.getStatusLine()).thenReturn(statusLine);
|
||||
when(statusLine.getStatusCode()).thenReturn(randomFrom(RestStatus.OK, RestStatus.CREATED).getStatus());
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
private List<Response> successfulPutResponses(final int successful) {
|
||||
final List<Response> responses = new ArrayList<>(successful);
|
||||
|
||||
for (int i = 0; i < successful; ++i) {
|
||||
responses.add(successfulPutResponse());
|
||||
}
|
||||
|
||||
return responses;
|
||||
}
|
||||
|
||||
private void whenValidVersionResponse() throws IOException {
|
||||
final HttpEntity entity = new StringEntity("{\"version\":{\"number\":\"" + Version.CURRENT + "\"}}", ContentType.APPLICATION_JSON);
|
||||
|
||||
when(versionResponse.getEntity()).thenReturn(entity);
|
||||
when(client.performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class))).thenReturn(versionResponse);
|
||||
}
|
||||
|
||||
private void whenGetTemplates(final int successful, final int unsuccessful) throws IOException {
|
||||
final List<Response> gets = getResponses(successful, unsuccessful);
|
||||
|
||||
if (gets.size() == 1) {
|
||||
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
|
||||
.thenReturn(gets.get(0));
|
||||
} else {
|
||||
when(client.performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class)))
|
||||
.thenReturn(gets.get(0), gets.subList(1, gets.size()).toArray(new Response[gets.size() - 1]));
|
||||
}
|
||||
}
|
||||
|
||||
private void whenSuccessfulPutTemplates(final int successful) throws IOException {
|
||||
final List<Response> successfulPuts = successfulPutResponses(successful);
|
||||
|
||||
// empty is possible if they all exist
|
||||
if (successful == 1) {
|
||||
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
|
||||
.thenReturn(successfulPuts.get(0));
|
||||
} else if (successful > 1) {
|
||||
when(client.performRequest(eq("PUT"), startsWith("/_template/"), anyMapOf(String.class, String.class), any(HttpEntity.class)))
|
||||
.thenReturn(successfulPuts.get(0), successfulPuts.subList(1, successful).toArray(new Response[successful - 1]));
|
||||
}
|
||||
}
|
||||
|
||||
private void whenGetPipelines(final int successful, final int unsuccessful) throws IOException {
|
||||
final List<Response> gets = getResponses(successful, unsuccessful);
|
||||
|
||||
if (gets.size() == 1) {
|
||||
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
|
||||
.thenReturn(gets.get(0));
|
||||
} else {
|
||||
when(client.performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class)))
|
||||
.thenReturn(gets.get(0), gets.subList(1, gets.size()).toArray(new Response[gets.size() - 1]));
|
||||
}
|
||||
}
|
||||
|
||||
private void whenSuccessfulPutPipelines(final int successful) throws IOException {
|
||||
final List<Response> successfulPuts = successfulPutResponses(successful);
|
||||
|
||||
// empty is possible if they all exist
|
||||
if (successful == 1) {
|
||||
when(client.performRequest(eq("PUT"),
|
||||
startsWith("/_ingest/pipeline/"),
|
||||
anyMapOf(String.class, String.class),
|
||||
any(HttpEntity.class)))
|
||||
.thenReturn(successfulPuts.get(0));
|
||||
} else if (successful > 1) {
|
||||
when(client.performRequest(eq("PUT"),
|
||||
startsWith("/_ingest/pipeline/"),
|
||||
anyMapOf(String.class, String.class),
|
||||
any(HttpEntity.class)))
|
||||
.thenReturn(successfulPuts.get(0), successfulPuts.subList(1, successful).toArray(new Response[successful - 1]));
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyVersionCheck() throws IOException {
|
||||
verify(client).performRequest(eq("GET"), eq("/"), anyMapOf(String.class, String.class));
|
||||
}
|
||||
|
||||
private void verifyGetTemplates(final int called) throws IOException {
|
||||
verify(client, times(called)).performRequest(eq("GET"), startsWith("/_template/"), anyMapOf(String.class, String.class));
|
||||
}
|
||||
|
||||
private void verifyPutTemplates(final int called) throws IOException {
|
||||
verify(client, times(called)).performRequest(eq("PUT"), // method
|
||||
startsWith("/_template/"), // endpoint
|
||||
anyMapOf(String.class, String.class), // parameters (e.g., timeout)
|
||||
any(HttpEntity.class)); // raw template
|
||||
}
|
||||
|
||||
private void verifyGetPipelines(final int called) throws IOException {
|
||||
verify(client, times(called)).performRequest(eq("GET"), startsWith("/_ingest/pipeline/"), anyMapOf(String.class, String.class));
|
||||
}
|
||||
|
||||
private void verifyPutPipelines(final int called) throws IOException {
|
||||
verify(client, times(called)).performRequest(eq("PUT"), // method
|
||||
startsWith("/_ingest/pipeline/"), // endpoint
|
||||
anyMapOf(String.class, String.class), // parameters (e.g., timeout)
|
||||
any(HttpEntity.class)); // raw template
|
||||
}
|
||||
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Tests for {@link HttpExporter}.
|
||||
*/
|
||||
public class HttpExporterSimpleTests extends ESTestCase {
|
||||
|
||||
private final Environment environment = mock(Environment.class);
|
||||
|
||||
public void testExporterWithBlacklistedHeaders() {
|
||||
final String blacklistedHeader = randomFrom(HttpExporter.BLACKLISTED_HEADERS);
|
||||
final String expected = "[" + blacklistedHeader + "] cannot be overwritten via [xpack.monitoring.exporters._http.headers]";
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200")
|
||||
.put("xpack.monitoring.exporters._http.headers.abc", "xyz")
|
||||
.put("xpack.monitoring.exporters._http.headers." + blacklistedHeader, "value should not matter");
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
|
||||
}
|
||||
|
||||
final Exporter.Config config = createConfig("_http", builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> {
|
||||
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
|
||||
});
|
||||
|
||||
assertThat(exception.getMessage(), equalTo(expected));
|
||||
}
|
||||
|
||||
public void testExporterWithEmptyHeaders() {
|
||||
final String name = randomFrom("abc", "ABC", "X-Flag");
|
||||
final String expected = "headers must have values, missing for setting [xpack.monitoring.exporters._http.headers." + name + "]";
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
|
||||
.put("xpack.monitoring.exporters._http.headers." + name, "");
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
|
||||
}
|
||||
|
||||
final Exporter.Config config = createConfig("_http", builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> {
|
||||
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
|
||||
});
|
||||
|
||||
assertThat(exception.getMessage(), equalTo(expected));
|
||||
}
|
||||
|
||||
public void testExporterWithMissingHost() {
|
||||
// forgot host!
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.host", "");
|
||||
} else if (randomBoolean()) {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host");
|
||||
} else if (randomBoolean()) {
|
||||
builder.putNull("xpack.monitoring.exporters._http.host");
|
||||
}
|
||||
|
||||
final Exporter.Config config = createConfig("_http", builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> {
|
||||
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
|
||||
});
|
||||
|
||||
assertThat(exception.getMessage(), equalTo("missing required setting [xpack.monitoring.exporters._http.host]"));
|
||||
}
|
||||
|
||||
public void testExporterWithInvalidHost() {
|
||||
final String invalidHost = randomFrom("://localhost:9200", "gopher!://xyz.my.com");
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
|
||||
|
||||
// sometimes add a valid URL with it
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host", "localhost:9200", invalidHost);
|
||||
} else {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host", invalidHost, "localhost:9200");
|
||||
}
|
||||
} else {
|
||||
builder.put("xpack.monitoring.exporters._http.host", invalidHost);
|
||||
}
|
||||
|
||||
final Exporter.Config config = createConfig("_http", builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> {
|
||||
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
|
||||
});
|
||||
|
||||
assertThat(exception.getMessage(), equalTo("[xpack.monitoring.exporters._http.host] invalid host: [" + invalidHost + "]"));
|
||||
}
|
||||
|
||||
public void testExporterWithHostOnly() {
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
|
||||
|
||||
final Exporter.Config config = createConfig("_http", builder.build());
|
||||
|
||||
new HttpExporter(config, environment, new SSLService(builder.build(), environment));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the {@link Exporter.Config} with the given name, and select those settings from {@code settings}.
|
||||
*
|
||||
* @param name The name of the exporter.
|
||||
* @param settings The settings to select the exporter's settings from
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
private static Exporter.Config createConfig(String name, Settings settings) {
|
||||
return new Exporter.Config(name, HttpExporter.TYPE, Settings.EMPTY, settings.getAsSettings("xpack.monitoring.exporters." + name));
|
||||
}
|
||||
|
||||
}
|
|
@ -1,211 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import com.squareup.okhttp.mockwebserver.Dispatcher;
|
||||
import com.squareup.okhttp.mockwebserver.MockResponse;
|
||||
import com.squareup.okhttp.mockwebserver.MockWebServer;
|
||||
import com.squareup.okhttp.mockwebserver.RecordedRequest;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.AbstractExporterTemplateTestCase;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.net.BindException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class HttpExporterTemplateTests extends AbstractExporterTemplateTestCase {
|
||||
|
||||
private MockWebServer webServer;
|
||||
private MockServerDispatcher dispatcher;
|
||||
|
||||
@Before
|
||||
public void startWebServer() throws Exception {
|
||||
for (int webPort = 9250; webPort < 9300; webPort++) {
|
||||
try {
|
||||
webServer = new MockWebServer();
|
||||
dispatcher = new MockServerDispatcher();
|
||||
webServer.setDispatcher(dispatcher);
|
||||
webServer.start(webPort);
|
||||
return;
|
||||
} catch (BindException be) {
|
||||
logger.warn("port [{}] was already in use trying next port", webPort);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchException("unable to find open port between 9200 and 9300");
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopWebServer() throws Exception {
|
||||
webServer.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings exporterSettings() {
|
||||
return Settings.builder()
|
||||
.put("type", "http")
|
||||
.put("host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("connection.keep_alive", false)
|
||||
.put(Exporter.INDEX_NAME_TIME_FORMAT_SETTING, "YYYY")
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void deleteTemplates() throws Exception {
|
||||
dispatcher.templates.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void deletePipeline() throws Exception {
|
||||
dispatcher.pipelines.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void putTemplate(String name) throws Exception {
|
||||
dispatcher.templates.put(name, generateTemplateSource(name));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void putPipeline(String name) throws Exception {
|
||||
dispatcher.pipelines.put(name, Exporter.emptyPipeline(XContentType.JSON).bytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTemplateExists(String name) throws Exception {
|
||||
assertThat("failed to find a template matching [" + name + "]", dispatcher.templates.containsKey(name), is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertPipelineExists(String name) throws Exception {
|
||||
assertThat("failed to find a pipeline matching [" + name + "]", dispatcher.pipelines.containsKey(name), is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertTemplateNotUpdated(String name) throws Exception {
|
||||
// Checks that no PUT Template request has been made
|
||||
assertThat(dispatcher.hasRequest("PUT", "/_template/" + name), is(false));
|
||||
|
||||
// Checks that the current template exists
|
||||
assertThat(dispatcher.templates.containsKey(name), is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertPipelineNotUpdated(String name) throws Exception {
|
||||
// Checks that no PUT pipeline request has been made
|
||||
assertThat(dispatcher.hasRequest("PUT", "/_ingest/pipeline/" + name), is(false));
|
||||
|
||||
// Checks that the current pipeline exists
|
||||
assertThat(dispatcher.pipelines.containsKey(name), is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void awaitIndexExists(String index) throws Exception {
|
||||
Runnable busy = () -> assertThat("could not find index " + index, dispatcher.hasIndex(index), is(true));
|
||||
assertBusy(busy, 10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
class MockServerDispatcher extends Dispatcher {
|
||||
|
||||
private final MockResponse NOT_FOUND = newResponse(404, "");
|
||||
|
||||
private final Set<String> requests = new HashSet<>();
|
||||
private final Map<String, BytesReference> templates = ConcurrentCollections.newConcurrentMap();
|
||||
private final Map<String, BytesReference> pipelines = ConcurrentCollections.newConcurrentMap();
|
||||
private final Set<String> indices = ConcurrentCollections.newConcurrentSet();
|
||||
|
||||
@Override
|
||||
public MockResponse dispatch(RecordedRequest request) throws InterruptedException {
|
||||
final String requestLine = request.getRequestLine();
|
||||
requests.add(requestLine);
|
||||
|
||||
// Cluster version
|
||||
if ("GET / HTTP/1.1".equals(requestLine)) {
|
||||
return newResponse(200, "{\"version\": {\"number\": \"" + Version.CURRENT.toString() + "\"}}");
|
||||
// Bulk
|
||||
} else if ("POST".equals(request.getMethod()) && request.getPath().startsWith("/_bulk")) {
|
||||
// Parse the bulk request and extract all index names
|
||||
try {
|
||||
BulkRequest bulk = new BulkRequest();
|
||||
byte[] source = request.getBody().readByteArray();
|
||||
bulk.add(source, 0, source.length);
|
||||
for (ActionRequest docRequest : bulk.requests()) {
|
||||
if (docRequest instanceof IndexRequest) {
|
||||
indices.add(((IndexRequest) docRequest).index());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
return newResponse(500, e.getMessage());
|
||||
}
|
||||
return newResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
// Templates and Pipelines
|
||||
} else if ("GET".equals(request.getMethod()) || "PUT".equals(request.getMethod())) {
|
||||
final String[] paths = request.getPath().split("/");
|
||||
|
||||
if (paths.length > 2) {
|
||||
// Templates
|
||||
if ("_template".equals(paths[1])) {
|
||||
// _template/{name}
|
||||
return newResponseForType(templates, request, paths[2]);
|
||||
} else if ("_ingest".equals(paths[1])) {
|
||||
// _ingest/pipeline/{name}
|
||||
return newResponseForType(pipelines, request, paths[3]);
|
||||
}
|
||||
}
|
||||
}
|
||||
return newResponse(500, "MockServerDispatcher does not support: " + request.getRequestLine());
|
||||
}
|
||||
|
||||
private MockResponse newResponseForType(Map<String, BytesReference> type, RecordedRequest request, String name) {
|
||||
final boolean exists = type.containsKey(name);
|
||||
|
||||
if ("GET".equals(request.getMethod())) {
|
||||
return exists ? newResponse(200, type.get(name).utf8ToString()) : NOT_FOUND;
|
||||
} else if ("PUT".equals(request.getMethod())) {
|
||||
type.put(name, new BytesArray(request.getMethod()));
|
||||
return exists ? newResponse(200, "updated") : newResponse(201, "created");
|
||||
}
|
||||
|
||||
return newResponse(500, request.getMethod() + " " + request.getPath() + " is not supported");
|
||||
}
|
||||
|
||||
MockResponse newResponse(int code, String body) {
|
||||
return new MockResponse().setResponseCode(code).setBody(body);
|
||||
}
|
||||
|
||||
int countRequests(String method, String path) {
|
||||
int count = 0;
|
||||
for (String request : requests) {
|
||||
if (request.startsWith(method + " " + path)) {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
boolean hasRequest(String method, String path) {
|
||||
return countRequests(method, path) > 0;
|
||||
}
|
||||
|
||||
boolean hasIndex(String index) {
|
||||
return indices.contains(index);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,606 +5,422 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import com.squareup.okhttp.mockwebserver.MockResponse;
|
||||
import com.squareup.okhttp.mockwebserver.MockWebServer;
|
||||
import com.squareup.okhttp.mockwebserver.QueueDispatcher;
|
||||
import com.squareup.okhttp.mockwebserver.RecordedRequest;
|
||||
import okio.Buffer;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.sniff.Sniffer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoredSystem;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStateMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryMonitoringDoc;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.bulk.MonitoringBulkTimestampedResolver;
|
||||
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter.Config;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import org.mockito.InOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.arrayContaining;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.atMost;
|
||||
import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.inOrder;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link HttpExporter}.
|
||||
*/
|
||||
public class HttpExporterTests extends ESTestCase {
|
||||
|
||||
private final SSLService sslService = mock(SSLService.class);
|
||||
|
||||
public void testExporterWithBlacklistedHeaders() {
|
||||
final String blacklistedHeader = randomFrom(HttpExporter.BLACKLISTED_HEADERS);
|
||||
final String expected = "[" + blacklistedHeader + "] cannot be overwritten via [xpack.monitoring.exporters._http.headers]";
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200")
|
||||
.put("xpack.monitoring.exporters._http.headers.abc", "xyz")
|
||||
.put("xpack.monitoring.exporters._http.headers." + blacklistedHeader, "value should not matter");
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
|
||||
public class HttpExporterTests extends MonitoringIntegTestCase {
|
||||
|
||||
private int webPort;
|
||||
private MockWebServer webServer;
|
||||
|
||||
@Before
|
||||
public void startWebservice() throws Exception {
|
||||
for (webPort = 9250; webPort < 9300; webPort++) {
|
||||
try {
|
||||
webServer = new MockWebServer();
|
||||
QueueDispatcher dispatcher = new QueueDispatcher();
|
||||
dispatcher.setFailFast(true);
|
||||
webServer.setDispatcher(dispatcher);
|
||||
webServer.start(webPort);
|
||||
return;
|
||||
} catch (BindException be) {
|
||||
logger.warn("port [{}] was already in use trying next port", webPort);
|
||||
}
|
||||
}
|
||||
throw new ElasticsearchException("unable to find open port between 9200 and 9300");
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
webServer.shutdown();
|
||||
}
|
||||
|
||||
private int expectedTemplateAndPipelineCalls(final boolean templateAlreadyExists, final boolean pipelineAlreadyExists) {
|
||||
return expectedTemplateCalls(templateAlreadyExists) + expectedPipelineCalls(pipelineAlreadyExists);
|
||||
}
|
||||
|
||||
private int expectedTemplateCalls(final boolean alreadyExists) {
|
||||
return monitoringTemplates().size() * (alreadyExists ? 1 : 2);
|
||||
}
|
||||
|
||||
private int expectedPipelineCalls(final boolean alreadyExists) {
|
||||
return alreadyExists ? 1 : 2;
|
||||
}
|
||||
|
||||
private void assertMonitorVersion(final MockWebServer webServer) throws Exception {
|
||||
assertMonitorVersion(webServer, null);
|
||||
}
|
||||
|
||||
private void assertMonitorVersion(final MockWebServer webServer, @Nullable final Map<String, String[]> customHeaders)
|
||||
throws Exception {
|
||||
RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo("/"));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
|
||||
private void assertMonitorTemplatesAndPipeline(final MockWebServer webServer,
|
||||
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists)
|
||||
throws Exception {
|
||||
assertMonitorTemplatesAndPipeline(webServer, templateAlreadyExists, pipelineAlreadyExists, null);
|
||||
}
|
||||
|
||||
private void assertMonitorTemplatesAndPipeline(final MockWebServer webServer,
|
||||
final boolean templateAlreadyExists, final boolean pipelineAlreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders) throws Exception {
|
||||
assertMonitorVersion(webServer, customHeaders);
|
||||
assertMonitorTemplates(webServer, templateAlreadyExists, customHeaders);
|
||||
assertMonitorPipelines(webServer, pipelineAlreadyExists, customHeaders);
|
||||
}
|
||||
|
||||
private void assertMonitorTemplates(final MockWebServer webServer, final boolean alreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders) throws Exception {
|
||||
RecordedRequest request;
|
||||
|
||||
for (Map.Entry<String, String> template : monitoringTemplates().entrySet()) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo("/_template/" + template.getKey()));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (alreadyExists == false) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("PUT"));
|
||||
assertThat(request.getPath(), equalTo("/_template/" + template.getKey()));
|
||||
assertThat(request.getBody().readUtf8(), equalTo(template.getValue()));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertMonitorPipelines(final MockWebServer webServer, final boolean alreadyExists,
|
||||
@Nullable final Map<String, String[]> customHeaders) throws Exception {
|
||||
RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("GET"));
|
||||
assertThat(request.getPath(), equalTo("/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (alreadyExists == false) {
|
||||
request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("PUT"));
|
||||
assertThat(request.getPath(), equalTo("/_ingest/pipeline/" + Exporter.EXPORT_PIPELINE_NAME));
|
||||
assertThat(request.getBody().readUtf8(), equalTo(Exporter.emptyPipeline(XContentType.JSON).string()));
|
||||
assertHeaders(request, customHeaders);
|
||||
}
|
||||
}
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer) throws Exception {
|
||||
return assertBulk(webServer, -1);
|
||||
}
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs) throws Exception {
|
||||
return assertBulk(webServer, docs, null);
|
||||
}
|
||||
|
||||
|
||||
private RecordedRequest assertBulk(final MockWebServer webServer, final int docs, @Nullable final Map<String, String[]> customHeaders)
|
||||
throws Exception {
|
||||
RecordedRequest request = webServer.takeRequest();
|
||||
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getPath(), equalTo("/_bulk?pipeline=" + Exporter.EXPORT_PIPELINE_NAME));
|
||||
assertHeaders(request, customHeaders);
|
||||
|
||||
if (docs != -1) {
|
||||
assertBulkRequest(request.getBody(), docs);
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
private void assertHeaders(final RecordedRequest request, final Map<String, String[]> customHeaders) {
|
||||
if (customHeaders != null) {
|
||||
for (final Map.Entry<String, String[]> entry : customHeaders.entrySet()) {
|
||||
final String header = entry.getKey();
|
||||
final String[] values = entry.getValue();
|
||||
|
||||
final List<String> headerValues = request.getHeaders().values(header);
|
||||
|
||||
assertThat(header, headerValues, hasSize(values.length));
|
||||
assertThat(header, headerValues, containsInAnyOrder(values));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testExport() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
|
||||
.put("xpack.monitoring.exporters._http.update_mappings", false);
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
final int nbDocs = randomIntBetween(1, 25);
|
||||
export(newRandomMonitoringDocs(nbDocs));
|
||||
|
||||
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
|
||||
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
assertBulk(webServer, nbDocs);
|
||||
}
|
||||
|
||||
public void testExportWithHeaders() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
|
||||
|
||||
final String headerValue = randomAsciiOfLengthBetween(3, 9);
|
||||
final String[] array = generateRandomStringArray(2, 4, false);
|
||||
|
||||
final Map<String, String[]> headers = new HashMap<>();
|
||||
|
||||
headers.put("X-Cloud-Cluster", new String[] { headerValue });
|
||||
headers.put("X-Found-Cluster", new String[] { headerValue });
|
||||
headers.put("Array-Check", array);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
|
||||
.put("xpack.monitoring.exporters._http.update_mappings", false)
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Cloud-Cluster", headerValue)
|
||||
.put("xpack.monitoring.exporters._http.headers.X-Found-Cluster", headerValue)
|
||||
.putArray("xpack.monitoring.exporters._http.headers.Array-Check", array);
|
||||
|
||||
internalCluster().startNode(builder);
|
||||
|
||||
final int nbDocs = randomIntBetween(1, 25);
|
||||
export(newRandomMonitoringDocs(nbDocs));
|
||||
|
||||
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
|
||||
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
assertBulk(webServer, nbDocs, headers);
|
||||
}
|
||||
|
||||
public void testDynamicHostChange() {
|
||||
// disable exporting to be able to use non valid hosts
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", "test0");
|
||||
|
||||
String nodeName = internalCluster().startNode(builder);
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.putArray("xpack.monitoring.exporters._http.host", "test1")));
|
||||
assertThat(getExporter(nodeName).hosts, arrayContaining("test1"));
|
||||
|
||||
// wipes the non array settings
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.putArray("xpack.monitoring.exporters._http.host", "test2")
|
||||
.put("xpack.monitoring.exporters._http.host", "")));
|
||||
assertThat(getExporter(nodeName).hosts, arrayContaining("test2"));
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.putArray("xpack.monitoring.exporters._http.host", "test3")));
|
||||
assertThat(getExporter(nodeName).hosts, arrayContaining("test3"));
|
||||
}
|
||||
|
||||
public void testHostChangeReChecksTemplate() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
|
||||
.put("xpack.monitoring.exporters._http.update_mappings", false);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
String agentNode = internalCluster().startNode(builder);
|
||||
|
||||
HttpExporter exporter = getExporter(agentNode);
|
||||
assertThat(exporter.supportedClusterVersion, is(false));
|
||||
export(Collections.singletonList(newRandomMonitoringDoc()));
|
||||
|
||||
assertThat(exporter.supportedClusterVersion, is(true));
|
||||
assertThat(webServer.getRequestCount(), equalTo(2 + expectedTemplateAndPipelineCalls));
|
||||
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
assertBulk(webServer);
|
||||
|
||||
MockWebServer secondWebServer = null;
|
||||
int secondWebPort;
|
||||
|
||||
try {
|
||||
final int expectedPipelineCalls = expectedPipelineCalls(!pipelineExistsAlready);
|
||||
|
||||
for (secondWebPort = 9250; secondWebPort < 9300; secondWebPort++) {
|
||||
try {
|
||||
secondWebServer = new MockWebServer();
|
||||
QueueDispatcher dispatcher = new QueueDispatcher();
|
||||
dispatcher.setFailFast(true);
|
||||
secondWebServer.setDispatcher(dispatcher);
|
||||
secondWebServer.start(secondWebPort);
|
||||
break;
|
||||
} catch (BindException be) {
|
||||
logger.warn("port [{}] was already in use trying next port", secondWebPort);
|
||||
}
|
||||
}
|
||||
|
||||
assertNotNull("Unable to start the second mock web server", secondWebServer);
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
Settings.builder().putArray("xpack.monitoring.exporters._http.host",
|
||||
secondWebServer.getHostName() + ":" + secondWebServer.getPort())).get());
|
||||
|
||||
// a new exporter is created on update, so we need to re-fetch it
|
||||
exporter = getExporter(agentNode);
|
||||
|
||||
enqueueGetClusterVersionResponse(secondWebServer, Version.CURRENT);
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
if (template.contains(MonitoringBulkTimestampedResolver.Data.DATA)) {
|
||||
enqueueResponse(secondWebServer, 200, "template [" + template + "] exists");
|
||||
} else {
|
||||
enqueueResponse(secondWebServer, 404, "template [" + template + "] does not exist");
|
||||
enqueueResponse(secondWebServer, 201, "template [" + template + "] created");
|
||||
}
|
||||
}
|
||||
enqueuePipelineResponses(secondWebServer, !pipelineExistsAlready);
|
||||
enqueueResponse(secondWebServer, 200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
logger.info("--> exporting a second event");
|
||||
export(Collections.singletonList(newRandomMonitoringDoc()));
|
||||
|
||||
assertThat(secondWebServer.getRequestCount(), equalTo(2 + monitoringTemplates().size() * 2 - 1 + expectedPipelineCalls));
|
||||
assertMonitorVersion(secondWebServer);
|
||||
|
||||
for (Map.Entry<String, String> template : monitoringTemplates().entrySet()) {
|
||||
RecordedRequest recordedRequest = secondWebServer.takeRequest();
|
||||
assertThat(recordedRequest.getMethod(), equalTo("GET"));
|
||||
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.getKey()));
|
||||
|
||||
if (template.getKey().contains(MonitoringBulkTimestampedResolver.Data.DATA) == false) {
|
||||
recordedRequest = secondWebServer.takeRequest();
|
||||
assertThat(recordedRequest.getMethod(), equalTo("PUT"));
|
||||
assertThat(recordedRequest.getPath(), equalTo("/_template/" + template.getKey()));
|
||||
assertThat(recordedRequest.getBody().readUtf8(), equalTo(template.getValue()));
|
||||
}
|
||||
}
|
||||
assertMonitorPipelines(secondWebServer, !pipelineExistsAlready, null);
|
||||
assertBulk(secondWebServer);
|
||||
} finally {
|
||||
if (secondWebServer != null) {
|
||||
secondWebServer.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testUnsupportedClusterVersion() throws Exception {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false);
|
||||
|
||||
// returning an unsupported cluster version
|
||||
enqueueGetClusterVersionResponse(randomFrom(Version.fromString("0.18.0"), Version.fromString("1.0.0"),
|
||||
Version.fromString("1.4.0")));
|
||||
|
||||
String agentNode = internalCluster().startNode(builder);
|
||||
|
||||
HttpExporter exporter = getExporter(agentNode);
|
||||
assertThat(exporter.supportedClusterVersion, is(false));
|
||||
assertNull(exporter.openBulk());
|
||||
|
||||
assertThat(exporter.supportedClusterVersion, is(false));
|
||||
assertThat(webServer.getRequestCount(), equalTo(1));
|
||||
|
||||
assertMonitorVersion(webServer);
|
||||
}
|
||||
|
||||
public void testDynamicIndexFormatChange() throws Exception {
|
||||
final boolean templatesExistsAlready = randomBoolean();
|
||||
final boolean pipelineExistsAlready = randomBoolean();
|
||||
final int expectedTemplateAndPipelineCalls = expectedTemplateAndPipelineCalls(templatesExistsAlready, pipelineExistsAlready);
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false)
|
||||
.put("xpack.monitoring.exporters._http.update_mappings", false);
|
||||
|
||||
String agentNode = internalCluster().startNode(builder);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
HttpExporter exporter = getExporter(agentNode);
|
||||
|
||||
MonitoringDoc doc = newRandomMonitoringDoc();
|
||||
export(Collections.singletonList(doc));
|
||||
|
||||
final int expectedRequests = 2 + expectedTemplateAndPipelineCalls;
|
||||
assertThat(webServer.getRequestCount(), equalTo(expectedRequests));
|
||||
assertMonitorTemplatesAndPipeline(webServer, templatesExistsAlready, pipelineExistsAlready);
|
||||
RecordedRequest recordedRequest = assertBulk(webServer);
|
||||
|
||||
String indexName = exporter.getResolvers().getResolver(doc).index(doc);
|
||||
|
||||
byte[] bytes = recordedRequest.getBody().readByteArray();
|
||||
Map<String, Object> data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
|
||||
Map<String, Object> index = (Map<String, Object>) data.get("index");
|
||||
assertThat(index.get("_index"), equalTo(indexName));
|
||||
|
||||
String newTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM");
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.index.name.time_format", newTimeFormat)));
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
enqueueTemplateAndPipelineResponses(webServer, true, true);
|
||||
enqueueResponse(200, "{\"errors\": false, \"msg\": \"successful bulk request\"}");
|
||||
|
||||
doc = newRandomMonitoringDoc();
|
||||
export(Collections.singletonList(doc));
|
||||
|
||||
String expectedMonitoringIndex = ".monitoring-es-" + MonitoringTemplateUtils.TEMPLATE_VERSION + "-"
|
||||
+ DateTimeFormat.forPattern(newTimeFormat).withZoneUTC().print(doc.getTimestamp());
|
||||
|
||||
final int expectedTemplatesAndPipelineExists = expectedTemplateAndPipelineCalls(true, true);
|
||||
assertThat(webServer.getRequestCount(), equalTo(expectedRequests + 2 + expectedTemplatesAndPipelineExists));
|
||||
assertMonitorTemplatesAndPipeline(webServer, true, true);
|
||||
recordedRequest = assertBulk(webServer);
|
||||
|
||||
bytes = recordedRequest.getBody().readByteArray();
|
||||
data = XContentHelper.convertToMap(new BytesArray(bytes), false).v2();
|
||||
index = (Map<String, Object>) data.get("index");
|
||||
assertThat(index.get("_index"), equalTo(expectedMonitoringIndex));
|
||||
}
|
||||
|
||||
public void testLoadRemoteClusterVersion() throws IOException {
|
||||
final String host = webServer.getHostName() + ":" + webServer.getPort();
|
||||
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(MonitoringSettings.INTERVAL.getKey(), "-1")
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", host)
|
||||
.put("xpack.monitoring.exporters._http.connection.keep_alive", false);
|
||||
|
||||
String agentNode = internalCluster().startNode(builder);
|
||||
HttpExporter exporter = getExporter(agentNode);
|
||||
|
||||
enqueueGetClusterVersionResponse(Version.CURRENT);
|
||||
Version resolved = exporter.loadRemoteClusterVersion(host);
|
||||
assertTrue(resolved.equals(Version.CURRENT));
|
||||
|
||||
final Version expected = randomFrom(Version.CURRENT, Version.V_2_0_0_beta1, Version.V_2_0_0_beta2, Version.V_2_0_0_rc1,
|
||||
Version.V_2_0_0, Version.V_2_1_0, Version.V_2_2_0, Version.V_2_3_0);
|
||||
enqueueGetClusterVersionResponse(expected);
|
||||
resolved = exporter.loadRemoteClusterVersion(host);
|
||||
assertTrue(resolved.equals(expected));
|
||||
}
|
||||
|
||||
private void export(Collection<MonitoringDoc> docs) throws Exception {
|
||||
Exporters exporters = internalCluster().getInstance(Exporters.class);
|
||||
assertThat(exporters, notNullValue());
|
||||
|
||||
// Wait for exporting bulks to be ready to export
|
||||
assertBusy(() -> exporters.forEach(exporter -> assertThat(exporter.openBulk(), notNullValue())));
|
||||
exporters.export(docs);
|
||||
}
|
||||
|
||||
private HttpExporter getExporter(String nodeName) {
|
||||
Exporters exporters = internalCluster().getInstance(Exporters.class, nodeName);
|
||||
return (HttpExporter) exporters.iterator().next();
|
||||
}
|
||||
|
||||
private MonitoringDoc newRandomMonitoringDoc() {
|
||||
if (randomBoolean()) {
|
||||
IndexRecoveryMonitoringDoc doc = new IndexRecoveryMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
|
||||
doc.setClusterUUID(internalCluster().getClusterName());
|
||||
doc.setTimestamp(System.currentTimeMillis());
|
||||
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
|
||||
doc.setRecoveryResponse(new RecoveryResponse());
|
||||
return doc;
|
||||
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(), equalTo(expected));
|
||||
}
|
||||
|
||||
public void testExporterWithEmptyHeaders() {
|
||||
final String name = randomFrom("abc", "ABC", "X-Flag");
|
||||
final String expected = "headers must have values, missing for setting [xpack.monitoring.exporters._http.headers." + name + "]";
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
|
||||
.put("xpack.monitoring.exporters._http.headers." + name, "");
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.headers.xyz", "abc");
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(), equalTo(expected));
|
||||
}
|
||||
|
||||
public void testExporterWithPasswordButNoUsername() {
|
||||
final String expected =
|
||||
"[xpack.monitoring.exporters._http.auth.password] without [xpack.monitoring.exporters._http.auth.username]";
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._http.host", "localhost:9200")
|
||||
.put("xpack.monitoring.exporters._http.auth.password", "_pass");
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(), equalTo(expected));
|
||||
}
|
||||
|
||||
public void testExporterWithMissingHost() {
|
||||
// forgot host!
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.host", "");
|
||||
} else if (randomBoolean()) {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host");
|
||||
} else if (randomBoolean()) {
|
||||
builder.putNull("xpack.monitoring.exporters._http.host");
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(), equalTo("missing required setting [xpack.monitoring.exporters._http.host]"));
|
||||
}
|
||||
|
||||
public void testExporterWithInconsistentSchemes() {
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
|
||||
.putArray("xpack.monitoring.exporters._http.host", "http://localhost:9200", "https://localhost:9201");
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(),
|
||||
equalTo("[xpack.monitoring.exporters._http.host] must use a consistent scheme: http or https"));
|
||||
}
|
||||
|
||||
public void testExporterWithInvalidHost() {
|
||||
final String invalidHost = randomFrom("://localhost:9200", "gopher!://xyz.my.com");
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE);
|
||||
|
||||
// sometimes add a valid URL with it
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host", "localhost:9200", invalidHost);
|
||||
} else {
|
||||
builder.putArray("xpack.monitoring.exporters._http.host", invalidHost, "localhost:9200");
|
||||
}
|
||||
} else {
|
||||
ClusterStateMonitoringDoc doc = new ClusterStateMonitoringDoc(MonitoredSystem.ES.getSystem(), Version.CURRENT.toString());
|
||||
doc.setClusterUUID(internalCluster().getClusterName());
|
||||
doc.setTimestamp(System.currentTimeMillis());
|
||||
doc.setSourceNode(new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
|
||||
doc.setClusterState(ClusterState.PROTO);
|
||||
doc.setStatus(ClusterHealthStatus.GREEN);
|
||||
return doc;
|
||||
builder.put("xpack.monitoring.exporters._http.host", invalidHost);
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final SettingsException exception = expectThrows(SettingsException.class, () -> new HttpExporter(config, sslService));
|
||||
|
||||
assertThat(exception.getMessage(), equalTo("[xpack.monitoring.exporters._http.host] invalid host: [" + invalidHost + "]"));
|
||||
}
|
||||
|
||||
public void testExporterWithHostOnly() throws Exception {
|
||||
final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
|
||||
when(sslService.sslIOSessionStrategy(any(Settings.class))).thenReturn(sslStrategy);
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
new HttpExporter(config, sslService).close();
|
||||
}
|
||||
|
||||
public void testCreateRestClient() throws IOException {
|
||||
final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
|
||||
|
||||
when(sslService.sslIOSessionStrategy(any(Settings.class))).thenReturn(sslStrategy);
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200");
|
||||
|
||||
// use basic auth
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.auth.username", "_user")
|
||||
.put("xpack.monitoring.exporters._http.auth.password", "_pass");
|
||||
}
|
||||
|
||||
// use headers
|
||||
if (randomBoolean()) {
|
||||
builder.put("xpack.monitoring.exporters._http.headers.abc", "xyz");
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
|
||||
// doesn't explode
|
||||
HttpExporter.createRestClient(config, sslService, listener).close();
|
||||
}
|
||||
|
||||
public void testCreateSnifferDisabledByDefault() {
|
||||
final Config config = createConfig(Settings.EMPTY);
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
|
||||
assertThat(HttpExporter.createSniffer(config, client, listener), nullValue());
|
||||
|
||||
verifyZeroInteractions(client, listener);
|
||||
}
|
||||
|
||||
public void testCreateSnifferWithoutHosts() {
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
.put("xpack.monitoring.exporters._http.sniff.enabled", true);
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
|
||||
expectThrows(IndexOutOfBoundsException.class, () -> HttpExporter.createSniffer(config, client, listener));
|
||||
}
|
||||
|
||||
public void testCreateSniffer() throws IOException {
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http")
|
||||
// it's a simple check: does it start with "https"?
|
||||
.put("xpack.monitoring.exporters._http.host", randomFrom("neither", "http", "https"))
|
||||
.put("xpack.monitoring.exporters._http.sniff.enabled", true);
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
final Response response = mock(Response.class);
|
||||
final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON);
|
||||
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response);
|
||||
|
||||
try (final Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) {
|
||||
assertThat(sniffer, not(nullValue()));
|
||||
|
||||
verify(listener).setSniffer(sniffer);
|
||||
}
|
||||
|
||||
// it's a race whether it triggers this at all
|
||||
verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class));
|
||||
|
||||
verifyNoMoreInteractions(client, listener);
|
||||
}
|
||||
|
||||
public void testCreateResources() {
|
||||
final boolean useIngest = randomBoolean();
|
||||
final TimeValue templateTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
|
||||
final TimeValue pipelineTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http");
|
||||
|
||||
if (useIngest == false) {
|
||||
builder.put("xpack.monitoring.exporters._http.use_ingest", false);
|
||||
}
|
||||
|
||||
if (templateTimeout != null) {
|
||||
builder.put("xpack.monitoring.exporters._http.index.template.master_timeout", templateTimeout.toString());
|
||||
}
|
||||
|
||||
// note: this shouldn't get used with useIngest == false, but it doesn't hurt to try to cause issues
|
||||
if (pipelineTimeout != null) {
|
||||
builder.put("xpack.monitoring.exporters._http.index.pipeline.master_timeout", pipelineTimeout.toString());
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final MultiHttpResource multiResource = HttpExporter.createResources(config, new ResolversRegistry(config.settings()));
|
||||
|
||||
final List<HttpResource> resources = multiResource.getResources();
|
||||
final int version = (int)resources.stream().filter((resource) -> resource instanceof VersionHttpResource).count();
|
||||
final List<TemplateHttpResource> templates =
|
||||
resources.stream().filter((resource) -> resource instanceof TemplateHttpResource)
|
||||
.map(TemplateHttpResource.class::cast)
|
||||
.collect(Collectors.toList());
|
||||
final List<PipelineHttpResource> pipelines =
|
||||
resources.stream().filter((resource) -> resource instanceof PipelineHttpResource)
|
||||
.map(PipelineHttpResource.class::cast)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// expected number of resources
|
||||
assertThat(multiResource.getResources().size(), equalTo(version + templates.size() + pipelines.size()));
|
||||
assertThat(version, equalTo(1));
|
||||
assertThat(templates, hasSize(3));
|
||||
assertThat(pipelines, hasSize(useIngest ? 1 : 0));
|
||||
|
||||
// timeouts
|
||||
assertMasterTimeoutSet(templates, templateTimeout);
|
||||
assertMasterTimeoutSet(pipelines, pipelineTimeout);
|
||||
|
||||
// logging owner names
|
||||
final List<String> uniqueOwners =
|
||||
resources.stream().map(HttpResource::getResourceOwnerName).distinct().collect(Collectors.toList());
|
||||
|
||||
assertThat(uniqueOwners, hasSize(1));
|
||||
assertThat(uniqueOwners.get(0), equalTo("xpack.monitoring.exporters._http"));
|
||||
}
|
||||
|
||||
public void testCreateDefaultParams() {
|
||||
final TimeValue bulkTimeout = randomFrom(TimeValue.timeValueSeconds(30), null);
|
||||
final boolean useIngest = randomBoolean();
|
||||
|
||||
final Settings.Builder builder = Settings.builder()
|
||||
.put("xpack.monitoring.exporters._http.type", "http");
|
||||
|
||||
if (bulkTimeout != null) {
|
||||
builder.put("xpack.monitoring.exporters._http.bulk.timeout", bulkTimeout.toString());
|
||||
}
|
||||
|
||||
if (useIngest == false) {
|
||||
builder.put("xpack.monitoring.exporters._http.use_ingest", false);
|
||||
}
|
||||
|
||||
final Config config = createConfig(builder.build());
|
||||
|
||||
final Map<String, String> parameters = new HashMap<>(HttpExporter.createDefaultParams(config));
|
||||
|
||||
assertThat(parameters.remove("filter_path"), equalTo("errors,items.*.error"));
|
||||
|
||||
if (bulkTimeout != null) {
|
||||
assertThat(parameters.remove("master_timeout"), equalTo(bulkTimeout.toString()));
|
||||
}
|
||||
|
||||
if (useIngest) {
|
||||
assertThat(parameters.remove("pipeline"), equalTo(Exporter.EXPORT_PIPELINE_NAME));
|
||||
}
|
||||
|
||||
// should have removed everything
|
||||
assertThat(parameters.size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testHttpExporterDirtyResourcesBlock() throws Exception {
|
||||
final Config config = createConfig(Settings.EMPTY);
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
|
||||
final HttpResource resource = new MockHttpResource(exporterName(), true, PublishableHttpResource.CheckResponse.ERROR, false);
|
||||
|
||||
try (final HttpExporter exporter = new HttpExporter(config, client, sniffer, listener, resolvers, resource)) {
|
||||
verify(listener).setResource(resource);
|
||||
|
||||
assertThat(exporter.openBulk(), nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
private List<MonitoringDoc> newRandomMonitoringDocs(int nb) {
|
||||
List<MonitoringDoc> docs = new ArrayList<>(nb);
|
||||
for (int i = 0; i < nb; i++) {
|
||||
docs.add(newRandomMonitoringDoc());
|
||||
public void testHttpExporter() throws Exception {
|
||||
final Config config = createConfig(Settings.EMPTY);
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
|
||||
// sometimes dirty to start with and sometimes not; but always succeeds on checkAndPublish
|
||||
final HttpResource resource = new MockHttpResource(exporterName(), randomBoolean());
|
||||
|
||||
try (final HttpExporter exporter = new HttpExporter(config, client, sniffer, listener, resolvers, resource)) {
|
||||
verify(listener).setResource(resource);
|
||||
|
||||
final HttpExportBulk bulk = exporter.openBulk();
|
||||
|
||||
assertThat(bulk.getName(), equalTo(exporterName()));
|
||||
}
|
||||
return docs;
|
||||
}
|
||||
|
||||
private void enqueueGetClusterVersionResponse(Version v) throws IOException {
|
||||
enqueueGetClusterVersionResponse(webServer, v);
|
||||
}
|
||||
public void testHttpExporterShutdown() throws Exception {
|
||||
final Config config = createConfig(Settings.EMPTY);
|
||||
final RestClient client = mock(RestClient.class);
|
||||
final Sniffer sniffer = randomFrom(mock(Sniffer.class), null);
|
||||
final NodeFailureListener listener = mock(NodeFailureListener.class);
|
||||
final ResolversRegistry resolvers = mock(ResolversRegistry.class);
|
||||
final MultiHttpResource resource = mock(MultiHttpResource.class);
|
||||
|
||||
private void enqueueGetClusterVersionResponse(MockWebServer mockWebServer, Version v) throws IOException {
|
||||
mockWebServer.enqueue(new MockResponse().setResponseCode(200).setBody(
|
||||
jsonBuilder().startObject().startObject("version").field("number", v.toString()).endObject().endObject().bytes()
|
||||
.utf8ToString()));
|
||||
}
|
||||
if (sniffer != null && rarely()) {
|
||||
doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(sniffer).close();
|
||||
}
|
||||
|
||||
private void enqueueTemplateAndPipelineResponses(final MockWebServer webServer,
|
||||
final boolean templatesAlreadyExists, final boolean pipelineAlreadyExists)
|
||||
throws IOException {
|
||||
enqueueTemplateResponses(webServer, templatesAlreadyExists);
|
||||
enqueuePipelineResponses(webServer, pipelineAlreadyExists);
|
||||
}
|
||||
if (rarely()) {
|
||||
doThrow(randomFrom(new IOException("expected"), new RuntimeException("expected"))).when(client).close();
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
|
||||
if (alreadyExists) {
|
||||
enqueueTemplateResponsesExistsAlready(webServer);
|
||||
new HttpExporter(config, client, sniffer, listener, resolvers, resource).close();
|
||||
|
||||
// order matters; sniffer must close first
|
||||
if (sniffer != null) {
|
||||
final InOrder inOrder = inOrder(sniffer, client);
|
||||
|
||||
inOrder.verify(sniffer).close();
|
||||
inOrder.verify(client).close();
|
||||
} else {
|
||||
enqueueTemplateResponsesDoesNotExistYet(webServer);
|
||||
verify(client).close();
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
enqueueResponse(webServer, 404, "template [" + template + "] does not exist");
|
||||
enqueueResponse(webServer, 201, "template [" + template + "] created");
|
||||
private void assertMasterTimeoutSet(final List<? extends PublishableHttpResource> resources, final TimeValue timeout) {
|
||||
if (timeout != null) {
|
||||
for (final PublishableHttpResource resource : resources) {
|
||||
assertThat(resource.getParameters().get("master_timeout"), equalTo(timeout.toString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void enqueueTemplateResponsesExistsAlready(final MockWebServer webServer) throws IOException {
|
||||
for (String template : monitoringTemplates().keySet()) {
|
||||
enqueueResponse(webServer, 200, "template [" + template + "] exists");
|
||||
}
|
||||
/**
|
||||
* Create the {@link Config} named "_http" and select those settings from {@code settings}.
|
||||
*
|
||||
* @param settings The settings to select the exporter's settings from
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
private static Config createConfig(Settings settings) {
|
||||
return new Config("_http", HttpExporter.TYPE, settings.getAsSettings(exporterName()));
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponses(final MockWebServer webServer, final boolean alreadyExists) throws IOException {
|
||||
if (alreadyExists) {
|
||||
enqueuePipelineResponsesExistsAlready(webServer);
|
||||
} else {
|
||||
enqueuePipelineResponsesDoesNotExistYet(webServer);
|
||||
}
|
||||
private static String exporterName() {
|
||||
return "xpack.monitoring.exporters._http";
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponsesDoesNotExistYet(final MockWebServer webServer) throws IOException {
|
||||
enqueueResponse(webServer, 404, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] does not exist");
|
||||
enqueueResponse(webServer, 201, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] created");
|
||||
}
|
||||
|
||||
private void enqueuePipelineResponsesExistsAlready(final MockWebServer webServer) throws IOException {
|
||||
enqueueResponse(webServer, 200, "pipeline [" + Exporter.EXPORT_PIPELINE_NAME + "] exists");
|
||||
}
|
||||
|
||||
private void enqueueResponse(int responseCode, String body) throws IOException {
|
||||
enqueueResponse(webServer, responseCode, body);
|
||||
}
|
||||
|
||||
private void enqueueResponse(MockWebServer mockWebServer, int responseCode, String body) throws IOException {
|
||||
mockWebServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body));
|
||||
}
|
||||
|
||||
private void assertBulkRequest(Buffer requestBody, int numberOfActions) throws Exception {
|
||||
BulkRequest bulkRequest = Requests.bulkRequest().add(new BytesArray(requestBody.readByteArray()), null, null);
|
||||
assertThat(bulkRequest.numberOfActions(), equalTo(numberOfActions));
|
||||
for (ActionRequest actionRequest : bulkRequest.requests()) {
|
||||
assertThat(actionRequest, instanceOf(IndexRequest.class));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
|
||||
public class HttpExporterUtilsTests extends ESTestCase {
|
||||
|
||||
public void testHostParsing() throws MalformedURLException, URISyntaxException {
|
||||
URL url = HttpExporterUtils.parseHostWithPath("localhost:9200", "");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("localhost", "_bulk");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("http://localhost:9200", "_bulk");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("http://localhost", "_bulk");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("https://localhost:9200", "_bulk");
|
||||
verifyUrl(url, "https", "localhost", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("https://boaz-air.local:9200", "_bulk");
|
||||
verifyUrl(url, "https", "boaz-air.local", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("localhost:9200/suburl", "");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/suburl/");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("localhost/suburl", "_bulk");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/suburl/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("http://localhost:9200/suburl/suburl1", "_bulk");
|
||||
verifyUrl(url, "http", "localhost", 9200, "/suburl/suburl1/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("https://localhost:9200/suburl", "_bulk");
|
||||
verifyUrl(url, "https", "localhost", 9200, "/suburl/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("https://server_with_underscore:9300", "_bulk");
|
||||
verifyUrl(url, "https", "server_with_underscore", 9300, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("server_with_underscore:9300", "_bulk");
|
||||
verifyUrl(url, "http", "server_with_underscore", 9300, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("server_with_underscore", "_bulk");
|
||||
verifyUrl(url, "http", "server_with_underscore", 9200, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("https://server-dash:9300", "_bulk");
|
||||
verifyUrl(url, "https", "server-dash", 9300, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("server-dash:9300", "_bulk");
|
||||
verifyUrl(url, "http", "server-dash", 9300, "/_bulk");
|
||||
|
||||
url = HttpExporterUtils.parseHostWithPath("server-dash", "_bulk");
|
||||
verifyUrl(url, "http", "server-dash", 9200, "/_bulk");
|
||||
}
|
||||
|
||||
void verifyUrl(URL url, String protocol, String host, int port, String path) throws URISyntaxException {
|
||||
assertThat(url.getProtocol(), equalTo(protocol));
|
||||
assertThat(url.getHost(), equalTo(host));
|
||||
assertThat(url.getPort(), equalTo(port));
|
||||
assertThat(url.toURI().getPath(), equalTo(path));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Tests {@link HttpHostBuilder}.
|
||||
*/
|
||||
public class HttpHostBuilderTests extends ESTestCase {
|
||||
|
||||
private final Scheme scheme = randomFrom(Scheme.values());
|
||||
private final String hostname = randomAsciiOfLengthBetween(1, 20);
|
||||
private final int port = randomIntBetween(1, 65535);
|
||||
|
||||
public void testBuilder() {
|
||||
assertHttpHost(HttpHostBuilder.builder(hostname), Scheme.HTTP, hostname, 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname), scheme, hostname, 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname + ":" + port), scheme, hostname, port);
|
||||
// weird port, but I don't expect it to explode
|
||||
assertHttpHost(HttpHostBuilder.builder(scheme.toString() + "://" + hostname + ":-1"), scheme, hostname, 9200);
|
||||
// port without scheme
|
||||
assertHttpHost(HttpHostBuilder.builder(hostname + ":" + port), Scheme.HTTP, hostname, port);
|
||||
|
||||
// fairly ordinary
|
||||
assertHttpHost(HttpHostBuilder.builder("localhost"), Scheme.HTTP, "localhost", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("localhost:9200"), Scheme.HTTP, "localhost", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://localhost"), Scheme.HTTP, "localhost", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://localhost:9200"), Scheme.HTTP, "localhost", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://localhost:9200"), Scheme.HTTPS, "localhost", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://boaz-air.local:9200"), Scheme.HTTPS, "boaz-air.local", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://server-dash:19200"), Scheme.HTTPS, "server-dash", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("server-dash:19200"), Scheme.HTTP, "server-dash", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("server-dash"), Scheme.HTTP, "server-dash", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("sub.domain"), Scheme.HTTP, "sub.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://sub.domain"), Scheme.HTTP, "sub.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://sub.domain:9200"), Scheme.HTTP, "sub.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://sub.domain:9200"), Scheme.HTTPS, "sub.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://sub.domain:19200"), Scheme.HTTPS, "sub.domain", 19200);
|
||||
|
||||
// ipv4
|
||||
assertHttpHost(HttpHostBuilder.builder("127.0.0.1"), Scheme.HTTP, "127.0.0.1", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("127.0.0.1:19200"), Scheme.HTTP, "127.0.0.1", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://127.0.0.1"), Scheme.HTTP, "127.0.0.1", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://127.0.0.1:9200"), Scheme.HTTP, "127.0.0.1", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://127.0.0.1:9200"), Scheme.HTTPS, "127.0.0.1", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://127.0.0.1:19200"), Scheme.HTTPS, "127.0.0.1", 19200);
|
||||
|
||||
// ipv6
|
||||
assertHttpHost(HttpHostBuilder.builder("[::1]"), Scheme.HTTP, "[::1]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("[::1]:19200"), Scheme.HTTP, "[::1]", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://[::1]"), Scheme.HTTP, "[::1]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://[::1]:9200"), Scheme.HTTP, "[::1]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://[::1]:9200"), Scheme.HTTPS, "[::1]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://[::1]:19200"), Scheme.HTTPS, "[::1]", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("[fdda:5cc1:23:4::1f]"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://[fdda:5cc1:23:4::1f]"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://[fdda:5cc1:23:4::1f]:9200"), Scheme.HTTP, "[fdda:5cc1:23:4::1f]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://[fdda:5cc1:23:4::1f]:9200"), Scheme.HTTPS, "[fdda:5cc1:23:4::1f]", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://[fdda:5cc1:23:4::1f]:19200"), Scheme.HTTPS, "[fdda:5cc1:23:4::1f]", 19200);
|
||||
|
||||
// underscores
|
||||
assertHttpHost(HttpHostBuilder.builder("server_with_underscore"), Scheme.HTTP, "server_with_underscore", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("server_with_underscore:19200"), Scheme.HTTP, "server_with_underscore", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore"), Scheme.HTTP, "server_with_underscore", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore:9200"), Scheme.HTTP, "server_with_underscore", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://server_with_underscore:19200"), Scheme.HTTP, "server_with_underscore", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore"), Scheme.HTTPS, "server_with_underscore", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore:9200"), Scheme.HTTPS, "server_with_underscore", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://server_with_underscore:19200"), Scheme.HTTPS, "server_with_underscore", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("_prefix.domain"), Scheme.HTTP, "_prefix.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("_prefix.domain:19200"), Scheme.HTTP, "_prefix.domain", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain"), Scheme.HTTP, "_prefix.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain:9200"), Scheme.HTTP, "_prefix.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("http://_prefix.domain:19200"), Scheme.HTTP, "_prefix.domain", 19200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain"), Scheme.HTTPS, "_prefix.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain:9200"), Scheme.HTTPS, "_prefix.domain", 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder("https://_prefix.domain:19200"), Scheme.HTTPS, "_prefix.domain", 19200);
|
||||
}
|
||||
|
||||
public void testManualBuilder() {
|
||||
assertHttpHost(HttpHostBuilder.builder().host(hostname), Scheme.HTTP, hostname, 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname), scheme, hostname, 9200);
|
||||
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname).port(port), scheme, hostname, port);
|
||||
// unset the port (not normal, but ensuring it works)
|
||||
assertHttpHost(HttpHostBuilder.builder().scheme(scheme).host(hostname).port(port).port(-1), scheme, hostname, 9200);
|
||||
// port without scheme
|
||||
assertHttpHost(HttpHostBuilder.builder().host(hostname).port(port), Scheme.HTTP, hostname, port);
|
||||
}
|
||||
|
||||
public void testBuilderNullUri() {
|
||||
final NullPointerException e = expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder(null));
|
||||
|
||||
assertThat(e.getMessage(), equalTo("uri must not be null"));
|
||||
}
|
||||
|
||||
public void testUnknownScheme() {
|
||||
assertBuilderBadSchemeThrows("htp://localhost:9200", "htp");
|
||||
assertBuilderBadSchemeThrows("htttp://localhost:9200", "htttp");
|
||||
assertBuilderBadSchemeThrows("httpd://localhost:9200", "httpd");
|
||||
assertBuilderBadSchemeThrows("ws://localhost:9200", "ws");
|
||||
assertBuilderBadSchemeThrows("wss://localhost:9200", "wss");
|
||||
assertBuilderBadSchemeThrows("ftp://localhost:9200", "ftp");
|
||||
assertBuilderBadSchemeThrows("gopher://localhost:9200", "gopher");
|
||||
assertBuilderBadSchemeThrows("localhost://9200", "localhost");
|
||||
}
|
||||
|
||||
public void testPathIsBlocked() {
|
||||
assertBuilderPathThrows("http://localhost:9200/", "/");
|
||||
assertBuilderPathThrows("http://localhost:9200/sub", "/sub");
|
||||
assertBuilderPathThrows("http://localhost:9200/sub/path", "/sub/path");
|
||||
}
|
||||
|
||||
public void testBuildWithoutHost() {
|
||||
final IllegalStateException e = expectThrows(IllegalStateException.class, () -> HttpHostBuilder.builder().build());
|
||||
|
||||
assertThat(e.getMessage(), equalTo("host must be set"));
|
||||
}
|
||||
|
||||
public void testNullScheme() {
|
||||
expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder().scheme(null));
|
||||
}
|
||||
|
||||
public void testNullHost() {
|
||||
expectThrows(NullPointerException.class, () -> HttpHostBuilder.builder().host(null));
|
||||
}
|
||||
|
||||
public void testBadPort() {
|
||||
assertPortThrows(0);
|
||||
assertPortThrows(65536);
|
||||
|
||||
assertPortThrows(randomIntBetween(Integer.MIN_VALUE, -2));
|
||||
assertPortThrows(randomIntBetween(65537, Integer.MAX_VALUE));
|
||||
}
|
||||
|
||||
private void assertHttpHost(final HttpHostBuilder host, final Scheme scheme, final String hostname, final int port) {
|
||||
assertHttpHost(host.build(), scheme, hostname, port);
|
||||
}
|
||||
|
||||
private void assertHttpHost(final HttpHost host, final Scheme scheme, final String hostname, final int port) {
|
||||
assertThat(host.getSchemeName(), equalTo(scheme.toString()));
|
||||
assertThat(host.getHostName(), equalTo(hostname));
|
||||
assertThat(host.getPort(), equalTo(port));
|
||||
}
|
||||
|
||||
private void assertBuilderPathThrows(final String uri, final String path) {
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder(uri));
|
||||
|
||||
assertThat(e.getMessage(), containsString("[" + path + "]"));
|
||||
}
|
||||
|
||||
private void assertBuilderBadSchemeThrows(final String uri, final String scheme) {
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder(uri));
|
||||
|
||||
assertThat(e.getMessage(), containsString(scheme));
|
||||
}
|
||||
|
||||
private void assertPortThrows(final int port) {
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> HttpHostBuilder.builder().port(port));
|
||||
|
||||
assertThat(e.getMessage(), containsString(Integer.toString(port)));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link HttpResource}.
|
||||
*/
|
||||
public class HttpResourceTests extends ESTestCase {
|
||||
|
||||
private final String owner = getTestName();
|
||||
private final RestClient client = mock(RestClient.class);
|
||||
|
||||
public void testConstructorRequiresOwner() {
|
||||
expectThrows(NullPointerException.class, () -> new HttpResource(null) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(RestClient client) {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testConstructor() {
|
||||
final HttpResource resource = new HttpResource(owner) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(RestClient client) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
assertSame(owner, resource.resourceOwnerName);
|
||||
assertTrue(resource.isDirty());
|
||||
}
|
||||
|
||||
public void testConstructorDirtiness() {
|
||||
final boolean dirty = randomBoolean();
|
||||
final HttpResource resource = new HttpResource(owner, dirty) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(RestClient client) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
assertSame(owner, resource.resourceOwnerName);
|
||||
assertEquals(dirty, resource.isDirty());
|
||||
}
|
||||
|
||||
public void testDirtiness() {
|
||||
// MockHttpResponse always succeeds for checkAndPublish
|
||||
final HttpResource resource = new MockHttpResource(owner);
|
||||
|
||||
assertTrue(resource.isDirty());
|
||||
|
||||
resource.markDirty();
|
||||
|
||||
assertTrue(resource.isDirty());
|
||||
|
||||
// if this fails, then the mocked resource needs to be fixed
|
||||
assertTrue(resource.checkAndPublish(client));
|
||||
|
||||
assertFalse(resource.isDirty());
|
||||
}
|
||||
|
||||
public void testCheckAndPublish() {
|
||||
final boolean expected = randomBoolean();
|
||||
// the default dirtiness should be irrelevant; it should always be run!
|
||||
final HttpResource resource = new HttpResource(owner) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(final RestClient client) {
|
||||
return expected;
|
||||
}
|
||||
};
|
||||
|
||||
assertEquals(expected, resource.checkAndPublish(client));
|
||||
}
|
||||
|
||||
public void testCheckAndPublishEvenWhenDirty() {
|
||||
final Supplier<Boolean> supplier = mock(Supplier.class);
|
||||
when(supplier.get()).thenReturn(true, false);
|
||||
|
||||
final HttpResource resource = new HttpResource(owner) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(final RestClient client) {
|
||||
return supplier.get();
|
||||
}
|
||||
};
|
||||
|
||||
assertTrue(resource.isDirty());
|
||||
assertTrue(resource.checkAndPublish(client));
|
||||
assertFalse(resource.isDirty());
|
||||
assertFalse(resource.checkAndPublish(client));
|
||||
|
||||
verify(supplier, times(2)).get();
|
||||
}
|
||||
|
||||
public void testCheckAndPublishIfDirty() {
|
||||
@SuppressWarnings("unchecked")
|
||||
final Supplier<Boolean> supplier = mock(Supplier.class);
|
||||
when(supplier.get()).thenReturn(true, false);
|
||||
|
||||
final HttpResource resource = new HttpResource(owner) {
|
||||
@Override
|
||||
protected boolean doCheckAndPublish(final RestClient client) {
|
||||
return supplier.get();
|
||||
}
|
||||
};
|
||||
|
||||
assertTrue(resource.isDirty());
|
||||
assertTrue(resource.checkAndPublishIfDirty(client));
|
||||
assertFalse(resource.isDirty());
|
||||
assertTrue(resource.checkAndPublishIfDirty(client));
|
||||
|
||||
// once is the default!
|
||||
verify(supplier).get();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* {@code MockHttpResource} the {@linkplain HttpResource#isDirty() dirtiness} to be defaulted.
|
||||
*/
|
||||
public class MockHttpResource extends PublishableHttpResource {
|
||||
|
||||
public final CheckResponse check;
|
||||
public final boolean publish;
|
||||
|
||||
public int checked = 0;
|
||||
public int published = 0;
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource} that starts dirty, but always succeeds.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName) {
|
||||
this(resourceOwnerName, true, CheckResponse.EXISTS, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource} that starts {@code dirty}, but always succeeds.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param dirty The starting dirtiness of the resource.
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName, final boolean dirty) {
|
||||
this(resourceOwnerName, dirty, CheckResponse.EXISTS, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource} that starts dirty, but always succeeds.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name.
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param parameters The base parameters to specify for the request.
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters) {
|
||||
this(resourceOwnerName, masterTimeout, parameters, true, CheckResponse.EXISTS, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource} that starts {@code dirty}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param dirty The starting dirtiness of the resource.
|
||||
* @param check The expected response when checking for the resource.
|
||||
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName, final boolean dirty, final CheckResponse check, final boolean publish) {
|
||||
this(resourceOwnerName, null, Collections.emptyMap(), dirty, check, publish);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource} that starts dirty.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param check The expected response when checking for the resource.
|
||||
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param parameters The base parameters to specify for the request.
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters,
|
||||
final CheckResponse check, final boolean publish) {
|
||||
this(resourceOwnerName, masterTimeout, parameters, true, check, publish);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link MockHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param dirty The starting dirtiness of the resource.
|
||||
* @param check The expected response when checking for the resource.
|
||||
* @param publish The expected response when publishing the resource (assumes check was {@link CheckResponse#DOES_NOT_EXIST}).
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param parameters The base parameters to specify for the request.
|
||||
*/
|
||||
public MockHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout, final Map<String, String> parameters,
|
||||
final boolean dirty, final CheckResponse check, final boolean publish) {
|
||||
super(resourceOwnerName, masterTimeout, parameters, dirty);
|
||||
|
||||
this.check = check;
|
||||
this.publish = publish;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
assert client != null;
|
||||
|
||||
++checked;
|
||||
|
||||
return check;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doPublish(final RestClient client) {
|
||||
assert client != null;
|
||||
|
||||
++published;
|
||||
|
||||
return publish;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import com.squareup.okhttp.mockwebserver.MockWebServer;
|
||||
import com.squareup.okhttp.mockwebserver.QueueDispatcher;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* {@code MockWebServerContainer} wraps a {@link MockWebServer} to avoid forcing every usage of it to do the same thing.
|
||||
*/
|
||||
public class MockWebServerContainer implements AutoCloseable {
|
||||
|
||||
private static Logger logger = Loggers.getLogger(MockWebServerContainer.class);
|
||||
|
||||
/**
|
||||
* The running {@link MockWebServer}.
|
||||
*/
|
||||
private final MockWebServer server;
|
||||
|
||||
/**
|
||||
* Create a {@link MockWebServerContainer} that uses a port from [{@code 9250}, {code 9300}).
|
||||
*
|
||||
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
|
||||
*/
|
||||
public MockWebServerContainer() {
|
||||
this(9250, 9300);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link MockWebServerContainer} that uses a port from [{@code startPort}, {code 9300}).
|
||||
* <p>
|
||||
* This is useful if you need to test with two {@link MockWebServer}s, so you can simply skip the port of the existing one.
|
||||
*
|
||||
* @param startPort The first port to try (inclusive).
|
||||
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
|
||||
*/
|
||||
public MockWebServerContainer(final int startPort) {
|
||||
this(startPort, 9300);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@link MockWebServerContainer} that uses a port from [{@code startPort}, {code endPort}).
|
||||
*
|
||||
* @param startPort The first port to try (inclusive).
|
||||
* @param endPort The last port to try (exclusive).
|
||||
* @throws RuntimeException if an unrecoverable exception occurs (e.g., no open ports available)
|
||||
*/
|
||||
public MockWebServerContainer(final int startPort, final int endPort) {
|
||||
final List<Integer> failedPorts = new ArrayList<>(0);
|
||||
final QueueDispatcher dispatcher = new QueueDispatcher();
|
||||
dispatcher.setFailFast(true);
|
||||
|
||||
MockWebServer webServer = null;
|
||||
|
||||
for (int port = startPort; port < endPort; ++port) {
|
||||
try {
|
||||
webServer = new MockWebServer();
|
||||
webServer.setDispatcher(dispatcher);
|
||||
|
||||
webServer.start(port);
|
||||
break;
|
||||
} catch (final BindException e) {
|
||||
failedPorts.add(port);
|
||||
webServer = null;
|
||||
} catch (final IOException e) {
|
||||
logger.error("unrecoverable failure while trying to start MockWebServer with port [{}]", e, port);
|
||||
throw new ElasticsearchException(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (webServer != null) {
|
||||
this.server = webServer;
|
||||
|
||||
if (failedPorts.isEmpty() == false) {
|
||||
logger.warn("ports [{}] were already in use. using port [{}]", failedPorts, webServer.getPort());
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchException("unable to find open port between [" + startPort + "] and [" + endPort + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link MockWebServer} created by this container.
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public MockWebServer getWebServer() {
|
||||
return server;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the port used by the running web server.
|
||||
*
|
||||
* @return The local port used by the {@linkplain #getWebServer() web server}.
|
||||
*/
|
||||
public int getPort() {
|
||||
return server.getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the formatted address in the form of "hostname:port".
|
||||
*
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public String getFormattedAddress() {
|
||||
return server.getHostName() + ":" + server.getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Shutdown the {@linkplain #getWebServer() web server}.
|
||||
*/
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
server.shutdown();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Tests {@link MultiHttpResource}.
|
||||
*/
|
||||
public class MultiHttpResourceTests extends ESTestCase {
|
||||
|
||||
private final String owner = getClass().getSimpleName();
|
||||
private final RestClient client = mock(RestClient.class);
|
||||
|
||||
public void testDoCheckAndPublish() {
|
||||
final List<MockHttpResource> allResources = successfulResources();
|
||||
final MultiHttpResource multiResource = new MultiHttpResource(owner, allResources);
|
||||
|
||||
assertTrue(multiResource.doCheckAndPublish(client));
|
||||
|
||||
for (final MockHttpResource resource : allResources) {
|
||||
assertSuccessfulResource(resource);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishShortCircuits() {
|
||||
// fail either the check or the publish
|
||||
final CheckResponse check = randomFrom(CheckResponse.ERROR, CheckResponse.DOES_NOT_EXIST);
|
||||
final boolean publish = check == CheckResponse.ERROR;
|
||||
final List<MockHttpResource> allResources = successfulResources();
|
||||
final MockHttpResource failureResource = new MockHttpResource(owner, true, check, publish);
|
||||
|
||||
allResources.add(failureResource);
|
||||
|
||||
Collections.shuffle(allResources, random());
|
||||
|
||||
final MultiHttpResource multiResource = new MultiHttpResource(owner, allResources);
|
||||
|
||||
assertFalse(multiResource.doCheckAndPublish(client));
|
||||
|
||||
boolean found = false;
|
||||
|
||||
for (final MockHttpResource resource : allResources) {
|
||||
// should stop looking at this point
|
||||
if (resource == failureResource) {
|
||||
assertThat(resource.checked, equalTo(1));
|
||||
if (resource.check == CheckResponse.ERROR) {
|
||||
assertThat(resource.published, equalTo(0));
|
||||
} else {
|
||||
assertThat(resource.published, equalTo(1));
|
||||
}
|
||||
|
||||
found = true;
|
||||
} else if (found) {
|
||||
assertThat(resource.checked, equalTo(0));
|
||||
assertThat(resource.published, equalTo(0));
|
||||
}
|
||||
else {
|
||||
assertSuccessfulResource(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private List<MockHttpResource> successfulResources() {
|
||||
final int successful = randomIntBetween(2, 5);
|
||||
final List<MockHttpResource> resources = new ArrayList<>(successful);
|
||||
|
||||
for (int i = 0; i < successful; ++i) {
|
||||
final CheckResponse check = randomFrom(CheckResponse.DOES_NOT_EXIST, CheckResponse.EXISTS);
|
||||
final MockHttpResource resource = new MockHttpResource(owner, randomBoolean(), check, check == CheckResponse.DOES_NOT_EXIST);
|
||||
|
||||
resources.add(resource);
|
||||
}
|
||||
|
||||
return resources;
|
||||
}
|
||||
|
||||
private void assertSuccessfulResource(final MockHttpResource resource) {
|
||||
assertThat(resource.checked, equalTo(1));
|
||||
if (resource.check == CheckResponse.DOES_NOT_EXIST) {
|
||||
assertThat(resource.published, equalTo(1));
|
||||
} else {
|
||||
assertThat(resource.published, equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.lucene.util.SetOnce.AlreadySetException;
|
||||
import org.elasticsearch.client.sniff.Sniffer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* Tests {@link NodeFailureListener}.
|
||||
*/
|
||||
public class NodeFailureListenerTests extends ESTestCase {
|
||||
|
||||
private final Sniffer sniffer = mock(Sniffer.class);
|
||||
private final HttpResource resource = new MockHttpResource(getTestName(), false);
|
||||
private final HttpHost host = new HttpHost("localhost", 9200);
|
||||
|
||||
private final NodeFailureListener listener = new NodeFailureListener();
|
||||
|
||||
public void testSetSnifferTwiceFails() {
|
||||
listener.setSniffer(sniffer);
|
||||
|
||||
assertThat(listener.getSniffer(), is(sniffer));
|
||||
|
||||
expectThrows(AlreadySetException.class, () -> listener.setSniffer(randomFrom(sniffer, null)));
|
||||
}
|
||||
|
||||
public void testSetResourceTwiceFails() {
|
||||
listener.setResource(resource);
|
||||
|
||||
assertThat(listener.getResource(), is(resource));
|
||||
|
||||
expectThrows(AlreadySetException.class, () -> listener.setResource(randomFrom(resource, null)));
|
||||
}
|
||||
|
||||
public void testSnifferNotifiedOnFailure() {
|
||||
listener.setSniffer(sniffer);
|
||||
|
||||
listener.onFailure(host);
|
||||
|
||||
verify(sniffer).sniffOnFailure(host);
|
||||
}
|
||||
|
||||
public void testResourceNotifiedOnFailure() {
|
||||
listener.setResource(resource);
|
||||
|
||||
listener.onFailure(host);
|
||||
|
||||
assertTrue(resource.isDirty());
|
||||
}
|
||||
|
||||
public void testResourceAndSnifferNotifiedOnFailure() {
|
||||
final HttpResource optionalResource = randomFrom(resource, null);
|
||||
final Sniffer optionalSniffer = randomFrom(sniffer, null);
|
||||
|
||||
listener.setResource(optionalResource);
|
||||
listener.setSniffer(optionalSniffer);
|
||||
|
||||
listener.onFailure(host);
|
||||
|
||||
if (optionalResource != null) {
|
||||
assertTrue(resource.isDirty());
|
||||
}
|
||||
|
||||
if (optionalSniffer != null) {
|
||||
verify(sniffer).sniffOnFailure(host);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
* Tests {@link PipelineHttpResource}.
|
||||
*/
|
||||
public class PipelineHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
|
||||
|
||||
private final String pipelineName = ".my_pipeline";
|
||||
private final byte[] pipelineBytes = new byte[] { randomByte(), randomByte(), randomByte() };
|
||||
private final Supplier<byte[]> pipeline = () -> pipelineBytes;
|
||||
|
||||
private final PipelineHttpResource resource = new PipelineHttpResource(owner, masterTimeout, pipelineName, pipeline);
|
||||
|
||||
public void testPipelineToHttpEntity() throws IOException {
|
||||
final HttpEntity entity = resource.pipelineToHttpEntity();
|
||||
|
||||
assertThat(entity.getContentType().getValue(), is(ContentType.APPLICATION_JSON.toString()));
|
||||
|
||||
final InputStream byteStream = entity.getContent();
|
||||
|
||||
assertThat(byteStream.available(), is(pipelineBytes.length));
|
||||
|
||||
for (final byte pipelineByte : pipelineBytes) {
|
||||
assertThat(pipelineByte, is((byte)byteStream.read()));
|
||||
}
|
||||
|
||||
assertThat(byteStream.available(), is(0));
|
||||
}
|
||||
|
||||
public void testDoCheckTrue() throws IOException {
|
||||
assertCheckExists(resource, "/_ingest/pipeline", pipelineName);
|
||||
}
|
||||
|
||||
public void testDoCheckFalse() throws IOException {
|
||||
assertCheckDoesNotExist(resource, "/_ingest/pipeline", pipelineName);
|
||||
}
|
||||
|
||||
public void testDoCheckNullWithException() throws IOException {
|
||||
assertCheckWithException(resource, "/_ingest/pipeline", pipelineName);
|
||||
}
|
||||
|
||||
public void testDoPublishTrue() throws IOException {
|
||||
assertPublishSucceeds(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
|
||||
}
|
||||
|
||||
public void testDoPublishFalse() throws IOException {
|
||||
assertPublishFails(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
|
||||
}
|
||||
|
||||
public void testDoPublishFalseWithException() throws IOException {
|
||||
assertPublishWithException(resource, "/_ingest/pipeline", pipelineName, ByteArrayEntity.class);
|
||||
}
|
||||
|
||||
public void testParameters() {
|
||||
assertParameters(resource);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse;
|
||||
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link PublishableHttpResource}.
|
||||
*/
|
||||
public class PublishableHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
|
||||
|
||||
private final String ownerType = "ownerthing";
|
||||
private final String resourceBasePath = "/_fake";
|
||||
private final String resourceName = ".my_thing";
|
||||
private final String resourceType = "thingamajig";
|
||||
private final Logger logger = mock(Logger.class);
|
||||
private final HttpEntity entity = mock(HttpEntity.class);
|
||||
private final Supplier<HttpEntity> body = () -> entity;
|
||||
|
||||
private final PublishableHttpResource resource =
|
||||
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS);
|
||||
|
||||
public void testCheckForResourceExists() throws IOException {
|
||||
assertCheckForResource(successfulCheckStatus(), CheckResponse.EXISTS, "{} [{}] found on the [{}] {}");
|
||||
}
|
||||
|
||||
public void testCheckForResourceDoesNotExist() throws IOException {
|
||||
assertCheckForResource(notFoundCheckStatus(), CheckResponse.DOES_NOT_EXIST, "{} [{}] does not exist on the [{}] {}");
|
||||
}
|
||||
|
||||
public void testCheckForResourceUnexpectedResponse() throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final RestStatus failedStatus = failedCheckStatus();
|
||||
final Response response = response("GET", endpoint, failedStatus);
|
||||
|
||||
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
|
||||
|
||||
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
|
||||
is(CheckResponse.ERROR));
|
||||
|
||||
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
verify(client).performRequest("GET", endpoint, resource.getParameters());
|
||||
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), any(ResponseException.class));
|
||||
|
||||
verifyNoMoreInteractions(client, logger);
|
||||
}
|
||||
|
||||
public void testCheckForResourceErrors() throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final RestStatus failedStatus = failedCheckStatus();
|
||||
final ResponseException responseException = responseException("GET", endpoint, failedStatus);
|
||||
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"), responseException);
|
||||
|
||||
when(client.performRequest("GET", endpoint, resource.getParameters())).thenThrow(e);
|
||||
|
||||
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
|
||||
is(CheckResponse.ERROR));
|
||||
|
||||
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
verify(client).performRequest("GET", endpoint, resource.getParameters());
|
||||
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), eq(e));
|
||||
|
||||
verifyNoMoreInteractions(client, logger);
|
||||
}
|
||||
|
||||
public void testPutResourceTrue() throws IOException {
|
||||
assertPutResource(successfulPublishStatus(), true);
|
||||
}
|
||||
|
||||
public void testPutResourceFalse() throws IOException {
|
||||
assertPutResource(failedPublishStatus(), false);
|
||||
}
|
||||
|
||||
public void testPutResourceFalseWithException() throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Exception e = randomFrom(new IOException("expected"), new RuntimeException("expected"));
|
||||
|
||||
when(client.performRequest("PUT", endpoint, resource.getParameters(), entity)).thenThrow(e);
|
||||
|
||||
assertThat(resource.putResource(client, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType), is(false));
|
||||
|
||||
verify(logger).trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
verify(client).performRequest("PUT", endpoint, resource.getParameters(), entity);
|
||||
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), eq(e));
|
||||
|
||||
verifyNoMoreInteractions(client, logger);
|
||||
}
|
||||
|
||||
public void testParameters() {
|
||||
assertParameters(resource);
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishIgnoresPublishWhenCheckErrors() {
|
||||
final PublishableHttpResource resource =
|
||||
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS, CheckResponse.ERROR, true);
|
||||
|
||||
assertThat(resource.doCheckAndPublish(client), is(false));
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublish() {
|
||||
// not an error (the third state)
|
||||
final PublishableHttpResource.CheckResponse exists = randomBoolean() ? CheckResponse.EXISTS : CheckResponse.DOES_NOT_EXIST;
|
||||
final boolean publish = randomBoolean();
|
||||
|
||||
final PublishableHttpResource resource =
|
||||
new MockHttpResource(owner, masterTimeout, PublishableHttpResource.NO_BODY_PARAMETERS, exists, publish);
|
||||
|
||||
assertThat(resource.doCheckAndPublish(client), is(exists == CheckResponse.EXISTS || publish));
|
||||
}
|
||||
|
||||
private void assertCheckForResource(final RestStatus status, final CheckResponse expected, final String debugLogMessage)
|
||||
throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Response response = response("GET", endpoint, status);
|
||||
|
||||
when(client.performRequest("GET", endpoint, resource.getParameters())).thenReturn(response);
|
||||
|
||||
assertThat(resource.checkForResource(client, logger, resourceBasePath, resourceName, resourceType, owner, ownerType),
|
||||
is(expected));
|
||||
|
||||
verify(logger).trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
verify(client).performRequest("GET", endpoint, resource.getParameters());
|
||||
|
||||
if (expected == CheckResponse.EXISTS) {
|
||||
verify(response).getStatusLine();
|
||||
} else {
|
||||
// 3 times because it also is used in the exception message
|
||||
verify(response, times(3)).getStatusLine();
|
||||
verify(response, times(2)).getRequestLine();
|
||||
verify(response).getHost();
|
||||
verify(response).getEntity();
|
||||
}
|
||||
|
||||
verify(logger).debug(debugLogMessage, resourceType, resourceName, owner, ownerType);
|
||||
|
||||
verifyNoMoreInteractions(client, response, logger);
|
||||
}
|
||||
|
||||
private void assertPutResource(final RestStatus status, final boolean expected) throws IOException {
|
||||
final String endpoint = concatenateEndpoint(resourceBasePath, resourceName);
|
||||
final Response response = response("PUT", endpoint, status);
|
||||
|
||||
when(client.performRequest("PUT", endpoint, resource.getParameters(), entity)).thenReturn(response);
|
||||
|
||||
assertThat(resource.putResource(client, logger, resourceBasePath, resourceName, body, resourceType, owner, ownerType),
|
||||
is(expected));
|
||||
|
||||
verify(client).performRequest("PUT", endpoint, resource.getParameters(), entity);
|
||||
verify(response).getStatusLine();
|
||||
|
||||
verify(logger).trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
|
||||
if (expected) {
|
||||
verify(logger).debug("{} [{}] uploaded to the [{}] {}", resourceType, resourceName, owner, ownerType);
|
||||
} else {
|
||||
ArgumentCaptor<RuntimeException> e = ArgumentCaptor.forClass(RuntimeException.class);
|
||||
|
||||
verify(logger).error(any(org.apache.logging.log4j.util.Supplier.class), e.capture());
|
||||
|
||||
assertThat(e.getValue().getMessage(),
|
||||
is("[" + resourceBasePath + "/" + resourceName + "] responded with [" + status.getStatus() + "]"));
|
||||
}
|
||||
|
||||
verifyNoMoreInteractions(client, response, logger, entity);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
/**
|
||||
* Tests {@link Scheme}.
|
||||
*/
|
||||
public class SchemeTests extends ESTestCase {
|
||||
|
||||
public void testToString() {
|
||||
for (final Scheme scheme : Scheme.values()) {
|
||||
assertThat(scheme.toString(), equalTo(scheme.name().toLowerCase(Locale.ROOT)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromString() {
|
||||
for (final Scheme scheme : Scheme.values()) {
|
||||
assertThat(Scheme.fromString(scheme.name()), sameInstance(scheme));
|
||||
assertThat(Scheme.fromString(scheme.name().toLowerCase(Locale.ROOT)), sameInstance(scheme));
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromStringMalformed() {
|
||||
assertIllegalScheme("htp");
|
||||
assertIllegalScheme("htttp");
|
||||
assertIllegalScheme("httpd");
|
||||
assertIllegalScheme("ftp");
|
||||
assertIllegalScheme("ws");
|
||||
assertIllegalScheme("wss");
|
||||
assertIllegalScheme("gopher");
|
||||
}
|
||||
|
||||
private void assertIllegalScheme(final String scheme) {
|
||||
try {
|
||||
Scheme.fromString(scheme);
|
||||
fail("scheme should be unknown: [" + scheme + "]");
|
||||
} catch (final IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("[" + scheme + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* Tests {@link SecurityHttpClientConfigCallback}.
|
||||
*/
|
||||
public class SecurityHttpClientConfigCallbackTests extends ESTestCase {
|
||||
|
||||
private final CredentialsProvider credentialsProvider = mock(CredentialsProvider.class);
|
||||
private final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
|
||||
/**
|
||||
* HttpAsyncClientBuilder's methods are {@code final} and therefore not verifiable.
|
||||
*/
|
||||
private final HttpAsyncClientBuilder builder = mock(HttpAsyncClientBuilder.class);
|
||||
|
||||
public void testSSLIOSessionStrategyNullThrowsException() {
|
||||
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
|
||||
|
||||
expectThrows(NullPointerException.class, () -> new SecurityHttpClientConfigCallback(null, optionalCredentialsProvider));
|
||||
}
|
||||
|
||||
public void testCustomizeHttpClient() {
|
||||
final SecurityHttpClientConfigCallback callback = new SecurityHttpClientConfigCallback(sslStrategy, credentialsProvider);
|
||||
|
||||
assertSame(credentialsProvider, callback.getCredentialsProvider());
|
||||
assertSame(sslStrategy, callback.getSSLStrategy());
|
||||
|
||||
assertSame(builder, callback.customizeHttpClient(builder));
|
||||
}
|
||||
|
||||
public void testCustomizeHttpClientWithOptionalParameters() {
|
||||
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
|
||||
|
||||
final SecurityHttpClientConfigCallback callback =
|
||||
new SecurityHttpClientConfigCallback(sslStrategy, optionalCredentialsProvider);
|
||||
|
||||
assertSame(builder, callback.customizeHttpClient(builder));
|
||||
assertSame(optionalCredentialsProvider, callback.getCredentialsProvider());
|
||||
assertSame(sslStrategy, callback.getSSLStrategy());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
* Tests {@link TemplateHttpResource}.
|
||||
*/
|
||||
public class TemplateHttpResourceTests extends AbstractPublishableHttpResourceTestCase {
|
||||
|
||||
private final String templateName = ".my_template";
|
||||
private final String templateValue = "{\"template\":\".xyz-*\",\"mappings\":{}}";
|
||||
private final Supplier<String> template = () -> templateValue;
|
||||
|
||||
private final TemplateHttpResource resource = new TemplateHttpResource(owner, masterTimeout, templateName, template);
|
||||
|
||||
public void testPipelineToHttpEntity() throws IOException {
|
||||
final byte[] templateValueBytes = templateValue.getBytes(ContentType.APPLICATION_JSON.getCharset());
|
||||
final HttpEntity entity = resource.templateToHttpEntity();
|
||||
|
||||
assertThat(entity.getContentType().getValue(), is(ContentType.APPLICATION_JSON.toString()));
|
||||
|
||||
final InputStream byteStream = entity.getContent();
|
||||
|
||||
assertThat(byteStream.available(), is(templateValueBytes.length));
|
||||
|
||||
for (final byte templateByte : templateValueBytes) {
|
||||
assertThat(templateByte, is((byte)byteStream.read()));
|
||||
}
|
||||
|
||||
assertThat(byteStream.available(), is(0));
|
||||
}
|
||||
|
||||
public void testDoCheckTrue() throws IOException {
|
||||
assertCheckExists(resource, "/_template", templateName);
|
||||
}
|
||||
|
||||
public void testDoCheckFalse() throws IOException {
|
||||
assertCheckDoesNotExist(resource, "/_template", templateName);
|
||||
}
|
||||
|
||||
public void testDoCheckNullWithException() throws IOException {
|
||||
assertCheckWithException(resource, "/_template", templateName);
|
||||
}
|
||||
|
||||
public void testDoPublishTrue() throws IOException {
|
||||
assertPublishSucceeds(resource, "/_template", templateName, StringEntity.class);
|
||||
}
|
||||
|
||||
public void testDoPublishFalse() throws IOException {
|
||||
assertPublishFails(resource, "/_template", templateName, StringEntity.class);
|
||||
}
|
||||
|
||||
public void testDoPublishFalseWithException() throws IOException {
|
||||
assertPublishWithException(resource, "/_template", templateName, StringEntity.class);
|
||||
}
|
||||
|
||||
public void testParameters() {
|
||||
assertParameters(resource);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import org.junit.Before;
|
||||
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link TimeoutRequestConfigCallback}.
|
||||
*/
|
||||
public class TimeoutRequestConfigCallbackTests extends ESTestCase {
|
||||
|
||||
private final TimeValue connectTimeout = mock(TimeValue.class);
|
||||
private final int connectTimeoutMillis = randomInt();
|
||||
private final TimeValue socketTimeout = mock(TimeValue.class);
|
||||
private final int socketTimeoutMillis = randomInt();
|
||||
private final RequestConfig.Builder builder = mock(RequestConfig.Builder.class);
|
||||
|
||||
@Before
|
||||
public void configureTimeouts() {
|
||||
when(connectTimeout.millis()).thenReturn((long)connectTimeoutMillis);
|
||||
when(socketTimeout.millis()).thenReturn((long)socketTimeoutMillis);
|
||||
}
|
||||
|
||||
public void testCustomizeRequestConfig() {
|
||||
final TimeoutRequestConfigCallback callback = new TimeoutRequestConfigCallback(connectTimeout, socketTimeout);
|
||||
|
||||
assertSame(builder, callback.customizeRequestConfig(builder));
|
||||
|
||||
verify(builder).setConnectTimeout(connectTimeoutMillis);
|
||||
verify(builder).setSocketTimeout(socketTimeoutMillis);
|
||||
}
|
||||
|
||||
public void testCustomizeRequestConfigWithOptionalParameters() {
|
||||
final TimeValue optionalConnectTimeout = randomFrom(connectTimeout, null);
|
||||
// avoid making both null at the same time
|
||||
final TimeValue optionalSocketTimeout = optionalConnectTimeout != null ? randomFrom(socketTimeout, null) : socketTimeout;
|
||||
|
||||
final TimeoutRequestConfigCallback callback = new TimeoutRequestConfigCallback(optionalConnectTimeout, optionalSocketTimeout);
|
||||
|
||||
assertSame(builder, callback.customizeRequestConfig(builder));
|
||||
assertSame(optionalConnectTimeout, callback.getConnectTimeout());
|
||||
assertSame(optionalSocketTimeout, callback.getSocketTimeout());
|
||||
|
||||
if (optionalConnectTimeout != null) {
|
||||
verify(builder).setConnectTimeout(connectTimeoutMillis);
|
||||
} else {
|
||||
verify(builder, never()).setConnectTimeout(anyInt());
|
||||
}
|
||||
|
||||
if (optionalSocketTimeout != null) {
|
||||
verify(builder).setSocketTimeout(socketTimeoutMillis);
|
||||
} else {
|
||||
verify(builder, never()).setSocketTimeout(anyInt());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
/**
|
||||
* Tests {@link VersionHttpResource}.
|
||||
*/
|
||||
public class VersionHttpResourceTests extends ESTestCase {
|
||||
|
||||
private final String owner = getClass().getSimpleName();
|
||||
private final RestClient client = mock(RestClient.class);
|
||||
|
||||
public void testDoCheckAndPublishSuccess() throws IOException {
|
||||
final Version minimumVersion =
|
||||
randomFrom(Version.V_2_0_0, Version.V_2_0_0_beta1, Version.V_2_0_0_rc1, Version.V_2_3_3, Version.CURRENT);
|
||||
final Version version = randomFrom(minimumVersion, Version.CURRENT);
|
||||
final Response response = responseForVersion(version);
|
||||
|
||||
final VersionHttpResource resource = new VersionHttpResource(owner, minimumVersion);
|
||||
|
||||
assertTrue(resource.doCheckAndPublish(client));
|
||||
|
||||
verify(response).getEntity();
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishFailedParsing() throws IOException {
|
||||
// malformed JSON
|
||||
final Response response = responseForJSON("{");
|
||||
|
||||
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
|
||||
|
||||
assertFalse(resource.doCheckAndPublish(client));
|
||||
|
||||
verify(response).getEntity();
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishFailedFieldMissing() throws IOException {
|
||||
// malformed response; imagining that we may change it in the future or someone breaks filter_path
|
||||
final Response response = responseForJSON("{\"version.number\":\"" + Version.CURRENT + "\"}");
|
||||
|
||||
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
|
||||
|
||||
assertFalse(resource.doCheckAndPublish(client));
|
||||
|
||||
verify(response).getEntity();
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishFailedFieldWrongType() throws IOException {
|
||||
// malformed response (should be {version: { number : ... }})
|
||||
final Response response = responseForJSON("{\"version\":\"" + Version.CURRENT + "\"}");
|
||||
|
||||
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
|
||||
|
||||
assertFalse(resource.doCheckAndPublish(client));
|
||||
|
||||
verify(response).getEntity();
|
||||
}
|
||||
|
||||
public void testDoCheckAndPublishFailedWithIOException() throws IOException {
|
||||
// request fails for some reason
|
||||
when(client.performRequest("GET", "/", VersionHttpResource.PARAMETERS)).thenThrow(new IOException("expected"));
|
||||
|
||||
final VersionHttpResource resource = new VersionHttpResource(owner, Version.CURRENT);
|
||||
|
||||
assertFalse(resource.doCheckAndPublish(client));
|
||||
}
|
||||
|
||||
private Response responseForJSON(final String json) throws IOException {
|
||||
final StringEntity entity = new StringEntity(json, ContentType.APPLICATION_JSON);
|
||||
|
||||
final Response response = mock(Response.class);
|
||||
when(response.getEntity()).thenReturn(entity);
|
||||
|
||||
when(client.performRequest("GET", "/", VersionHttpResource.PARAMETERS)).thenReturn(response);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
private Response responseForVersion(final Version version) throws IOException {
|
||||
return responseForJSON("{\"version\":{\"number\":\"" + version + "\"}}");
|
||||
}
|
||||
|
||||
}
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterInfoMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolverTestCase;
|
||||
|
@ -49,6 +50,7 @@ public class ClusterInfoResolverTests extends MonitoringIndexNameResolverTestCas
|
|||
doc.setClusterName(randomAsciiOfLength(5));
|
||||
doc.setClusterStats(new ClusterStatsResponse(Math.abs(randomLong()), ClusterName.CLUSTER_NAME_SETTING
|
||||
.getDefault(Settings.EMPTY), randomAsciiOfLength(5), Collections.emptyList(), Collections.emptyList()));
|
||||
doc.setUsage(Collections.singletonList(new MonitoringFeatureSet.Usage(randomBoolean(), randomBoolean(), emptyMap())));
|
||||
return doc;
|
||||
} catch (Exception e) {
|
||||
throw new IllegalStateException("Failed to generated random ClusterInfoMonitoringDoc", e);
|
||||
|
@ -72,13 +74,14 @@ public class ClusterInfoResolverTests extends MonitoringIndexNameResolverTestCas
|
|||
assertThat(resolver.id(doc), equalTo(clusterUUID));
|
||||
|
||||
assertSource(resolver.source(doc, XContentType.JSON),
|
||||
Sets.newHashSet(
|
||||
"cluster_uuid",
|
||||
"timestamp",
|
||||
"source_node",
|
||||
"cluster_name",
|
||||
"version",
|
||||
"license",
|
||||
"cluster_stats"));
|
||||
Sets.newHashSet(
|
||||
"cluster_uuid",
|
||||
"timestamp",
|
||||
"source_node",
|
||||
"cluster_name",
|
||||
"version",
|
||||
"license",
|
||||
"cluster_stats",
|
||||
"stack_stats.xpack"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.isEmptyOrNullString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
@ -61,14 +62,14 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
final String clusterUUID = client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID();
|
||||
assertTrue(Strings.hasText(clusterUUID));
|
||||
|
||||
logger.debug("--> waiting for the monitoring data index to be created (it should have been created by the ClusterInfoCollector)");
|
||||
// waiting for the monitoring data index to be created (it should have been created by the ClusterInfoCollector
|
||||
String dataIndex = ".monitoring-data-" + MonitoringTemplateUtils.TEMPLATE_VERSION;
|
||||
awaitIndexExists(dataIndex);
|
||||
|
||||
logger.debug("--> waiting for cluster info collector to collect data");
|
||||
// waiting for cluster info collector to collect data
|
||||
awaitMonitoringDocsCount(equalTo(1L), ClusterInfoResolver.TYPE);
|
||||
|
||||
logger.debug("--> retrieving cluster info document");
|
||||
// retrieving cluster info document
|
||||
GetResponse response = client().prepareGet(dataIndex, ClusterInfoResolver.TYPE, clusterUUID).get();
|
||||
assertTrue("cluster_info document does not exist in data index", response.isExists());
|
||||
|
||||
|
@ -80,20 +81,19 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat(source.get(MonitoringIndexNameResolver.Fields.CLUSTER_UUID), notNullValue());
|
||||
assertThat(source.get(MonitoringIndexNameResolver.Fields.TIMESTAMP), notNullValue());
|
||||
assertThat(source.get(MonitoringIndexNameResolver.Fields.SOURCE_NODE), notNullValue());
|
||||
assertThat(source.get(ClusterInfoResolver.Fields.CLUSTER_NAME), equalTo(cluster().getClusterName()));
|
||||
assertThat(source.get(ClusterInfoResolver.Fields.VERSION), equalTo(Version.CURRENT.toString()));
|
||||
assertThat(source.get("cluster_name"), equalTo(cluster().getClusterName()));
|
||||
assertThat(source.get("version"), equalTo(Version.CURRENT.toString()));
|
||||
|
||||
logger.debug("--> checking that the document contains license information");
|
||||
Object licenseObj = source.get(ClusterInfoResolver.Fields.LICENSE);
|
||||
Object licenseObj = source.get("license");
|
||||
assertThat(licenseObj, instanceOf(Map.class));
|
||||
Map license = (Map) licenseObj;
|
||||
|
||||
assertThat(license, instanceOf(Map.class));
|
||||
|
||||
String uid = (String) license.get(ClusterInfoResolver.Fields.UID);
|
||||
String uid = (String) license.get("uid");
|
||||
assertThat(uid, not(isEmptyOrNullString()));
|
||||
|
||||
String type = (String) license.get(ClusterInfoResolver.Fields.TYPE);
|
||||
String type = (String) license.get("type");
|
||||
assertThat(type, not(isEmptyOrNullString()));
|
||||
|
||||
String status = (String) license.get(License.Fields.STATUS);
|
||||
|
@ -103,7 +103,7 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat(expiryDate, greaterThan(0L));
|
||||
|
||||
// We basically recompute the hash here
|
||||
String hkey = (String) license.get(ClusterInfoResolver.Fields.HKEY);
|
||||
String hkey = (String) license.get("hkey");
|
||||
String recalculated = ClusterInfoResolver.hash(status, uid, type, String.valueOf(expiryDate), clusterUUID);
|
||||
assertThat(hkey, equalTo(recalculated));
|
||||
|
||||
|
@ -112,14 +112,30 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat((Long) license.get(License.Fields.ISSUE_DATE_IN_MILLIS), greaterThan(0L));
|
||||
assertThat((Integer) license.get(License.Fields.MAX_NODES), greaterThan(0));
|
||||
|
||||
Object clusterStats = source.get(ClusterInfoResolver.Fields.CLUSTER_STATS);
|
||||
Object clusterStats = source.get("cluster_stats");
|
||||
assertNotNull(clusterStats);
|
||||
assertThat(clusterStats, instanceOf(Map.class));
|
||||
assertThat(((Map) clusterStats).size(), greaterThan(0));
|
||||
|
||||
Object stackStats = source.get("stack_stats");
|
||||
assertNotNull(stackStats);
|
||||
assertThat(stackStats, instanceOf(Map.class));
|
||||
assertThat(((Map) stackStats).size(), equalTo(1));
|
||||
|
||||
Object xpack = ((Map)stackStats).get("xpack");
|
||||
assertNotNull(xpack);
|
||||
assertThat(xpack, instanceOf(Map.class));
|
||||
// it must have at least monitoring, but others may be hidden
|
||||
assertThat(((Map) xpack).size(), greaterThanOrEqualTo(1));
|
||||
|
||||
Object monitoring = ((Map)xpack).get("monitoring");
|
||||
assertNotNull(monitoring);
|
||||
// we don't make any assumptions about what's in it, only that it's there
|
||||
assertThat(monitoring, instanceOf(Map.class));
|
||||
|
||||
waitForMonitoringTemplates();
|
||||
|
||||
logger.debug("--> check that the cluster_info is not indexed");
|
||||
// check that the cluster_info is not indexed
|
||||
securedFlush();
|
||||
securedRefresh();
|
||||
|
||||
|
@ -131,8 +147,7 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.ACTIVE.label()))
|
||||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.INVALID.label()))
|
||||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.EXPIRED.label()))
|
||||
.should(QueryBuilders.matchQuery(ClusterInfoResolver.Fields.CLUSTER_NAME,
|
||||
cluster().getClusterName()))
|
||||
.should(QueryBuilders.matchQuery("cluster_name", cluster().getClusterName()))
|
||||
.minimumNumberShouldMatch(1)
|
||||
).get(), 0L);
|
||||
}
|
||||
|
|
|
@ -112,11 +112,12 @@ public class ClusterStatsResolverTests extends MonitoringIndexNameResolverTestCa
|
|||
BoundTransportAddress transportAddress = new BoundTransportAddress(new TransportAddress[]{LocalTransportAddress.buildUnique()},
|
||||
LocalTransportAddress.buildUnique());
|
||||
return new NodeInfo(Version.CURRENT, org.elasticsearch.Build.CURRENT,
|
||||
new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
|
||||
Settings.EMPTY, DummyOsInfo.INSTANCE, new ProcessInfo(randomInt(), randomBoolean()), JvmInfo.jvmInfo(),
|
||||
new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), Settings.EMPTY,
|
||||
DummyOsInfo.INSTANCE, new ProcessInfo(randomInt(), randomBoolean(), randomPositiveLong()), JvmInfo.jvmInfo(),
|
||||
new ThreadPoolInfo(Collections.singletonList(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5))),
|
||||
new TransportInfo(transportAddress, Collections.emptyMap()), new HttpInfo(transportAddress, randomLong()),
|
||||
new PluginsAndModules(), new IngestInfo(Collections.emptyList()), new ByteSizeValue(randomIntBetween(1, 1024)));
|
||||
new PluginsAndModules(Collections.emptyList(), Collections.emptyList()),
|
||||
new IngestInfo(Collections.emptyList()), new ByteSizeValue(randomIntBetween(1, 1024)));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -81,6 +82,19 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
|
|||
if (Constants.WINDOWS && field.startsWith("node_stats.os.cpu.load_average")) {
|
||||
return;
|
||||
}
|
||||
|
||||
// we only report IoStats and spins on Linux
|
||||
if (Constants.LINUX == false) {
|
||||
if (field.startsWith("node_stats.fs.io_stats")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// node_stats.fs.data.spins can be null and it's only reported on Linux
|
||||
if (field.startsWith("node_stats.fs.data.spins")) {
|
||||
return;
|
||||
}
|
||||
|
||||
super.assertSourceField(field, sourceFields);
|
||||
}
|
||||
|
||||
|
@ -140,6 +154,22 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
|
|||
new NodeIndicesStats(new CommonStats(), statsByShard), OsProbe.getInstance().osStats(),
|
||||
ProcessProbe.getInstance().processStats(), JvmStats.jvmStats(),
|
||||
new ThreadPoolStats(threadPoolStats),
|
||||
new FsInfo(0, null, pathInfo), null, null, null, null, null, null);
|
||||
new FsInfo(0, randomIoStats(), pathInfo), null, null, null, null, null, null);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private FsInfo.IoStats randomIoStats() {
|
||||
if (Constants.LINUX) {
|
||||
final int stats = randomIntBetween(1, 3);
|
||||
final FsInfo.DeviceStats[] devices = new FsInfo.DeviceStats[stats];
|
||||
|
||||
for (int i = 0; i < devices.length; ++i) {
|
||||
devices[i] = new FsInfo.DeviceStats(253, 0, "dm-" + i, 287734, 7185242, 8398869, 118857776, null);
|
||||
}
|
||||
|
||||
return new FsInfo.IoStats(devices);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ package org.elasticsearch.xpack.monitoring.security;
|
|||
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -15,7 +15,7 @@ import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
|||
import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -86,7 +86,7 @@ public class MonitoringInternalClientTests extends MonitoringIntegTestCase {
|
|||
* @return the source of a random monitoring template
|
||||
*/
|
||||
private String randomTemplateSource() {
|
||||
return randomFrom(new ArrayList<>(monitoringTemplates().values()));
|
||||
return randomFrom(monitoringTemplates().stream().map(Tuple::v2).collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.support;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class VersionUtilsTests extends ESTestCase {
|
||||
|
||||
public void testParseVersion() {
|
||||
List<Version> versions = randomSubsetOf(9, Version.V_2_0_0_beta1, Version.V_2_0_0_beta2, Version.V_2_0_0_rc1, Version.V_2_0_0,
|
||||
Version.V_2_0_1, Version.V_2_0_2, Version.V_2_1_0, Version.V_2_1_1, Version.V_2_1_2, Version.V_2_2_0, Version.V_2_3_0,
|
||||
Version.V_5_0_0_alpha1);
|
||||
for (Version version : versions) {
|
||||
String output = createOutput(VersionUtils.VERSION_NUMBER_FIELD, version.toString());
|
||||
assertThat(VersionUtils.parseVersion(output.getBytes(StandardCharsets.UTF_8)), equalTo(version));
|
||||
assertThat(VersionUtils.parseVersion(VersionUtils.VERSION_NUMBER_FIELD, output), equalTo(version));
|
||||
}
|
||||
}
|
||||
|
||||
private String createOutput(String fieldName, String value) {
|
||||
return "{\n" +
|
||||
" \"name\" : \"Blind Faith\",\n" +
|
||||
" \"cluster_name\" : \"elasticsearch\",\n" +
|
||||
" \"version\" : {\n" +
|
||||
" \"" + fieldName + "\" : \"" + value + "\",\n" +
|
||||
" \"build_hash\" : \"4092d253dddda0ff1ff3d1c09ac7678e757843f9\",\n" +
|
||||
" \"build_timestamp\" : \"2015-10-13T08:53:10Z\",\n" +
|
||||
" \"build_snapshot\" : true,\n" +
|
||||
" \"lucene_version\" : \"5.2.1\"\n" +
|
||||
" },\n" +
|
||||
" \"tagline\" : \"You Know, for Search\"\n" +
|
||||
"}\n";
|
||||
}
|
||||
}
|
|
@ -10,6 +10,7 @@ import org.elasticsearch.client.Client;
|
|||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
@ -54,6 +55,7 @@ import java.nio.file.Path;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -170,7 +172,7 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Set<String> excludeTemplates() {
|
||||
return monitoringTemplates().keySet();
|
||||
return monitoringTemplateNames();
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -278,9 +280,17 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
protected Map<String, String> monitoringTemplates() {
|
||||
protected List<Tuple<String, String>> monitoringTemplates() {
|
||||
return StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
|
||||
.collect(Collectors.toMap(MonitoringIndexNameResolver::templateName, MonitoringIndexNameResolver::template, (a, b) -> a));
|
||||
.map((resolver) -> new Tuple<>(resolver.templateName(), resolver.template()))
|
||||
.distinct()
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
protected Set<String> monitoringTemplateNames() {
|
||||
return StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
|
||||
.map(MonitoringIndexNameResolver::templateName)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
protected void assertTemplateInstalled(String name) {
|
||||
|
@ -303,7 +313,7 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
protected void waitForMonitoringTemplates() throws Exception {
|
||||
assertBusy(() -> monitoringTemplates().keySet().forEach(this::assertTemplateInstalled), 30, TimeUnit.SECONDS);
|
||||
assertBusy(() -> monitoringTemplateNames().forEach(this::assertTemplateInstalled), 30, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
protected void waitForMonitoringIndices() throws Exception {
|
||||
|
@ -519,9 +529,6 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
|
|||
"\n" +
|
||||
"admin:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
|
||||
"transport_client:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
|
||||
"\n" +
|
||||
"monitor:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n"
|
||||
;
|
||||
|
|
|
@ -3,7 +3,7 @@ appender.audit_rolling.name = audit_rolling
|
|||
appender.audit_rolling.fileName = ${sys:es.logs}_access.log
|
||||
appender.audit_rolling.layout.type = PatternLayout
|
||||
appender.audit_rolling.layout.pattern = [%d{ISO8601}] %m%n
|
||||
appender.audit_rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
|
||||
appender.audit_rolling.filePattern = ${sys:es.logs}_access-%d{yyyy-MM-dd}.log
|
||||
appender.audit_rolling.policies.type = Policies
|
||||
appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.audit_rolling.policies.time.interval = 1
|
||||
|
|
|
@ -51,11 +51,13 @@ import org.elasticsearch.xpack.security.action.user.ChangePasswordAction;
|
|||
import org.elasticsearch.xpack.security.action.user.DeleteUserAction;
|
||||
import org.elasticsearch.xpack.security.action.user.GetUsersAction;
|
||||
import org.elasticsearch.xpack.security.action.user.PutUserAction;
|
||||
import org.elasticsearch.xpack.security.action.user.SetEnabledAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportAuthenticateAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportChangePasswordAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportDeleteUserAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportGetUsersAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportPutUserAction;
|
||||
import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrail;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrailService;
|
||||
import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail;
|
||||
|
@ -96,6 +98,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestChangePasswordActio
|
|||
import org.elasticsearch.xpack.security.rest.action.user.RestDeleteUserAction;
|
||||
import org.elasticsearch.xpack.security.rest.action.user.RestGetUsersAction;
|
||||
import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction;
|
||||
import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction;
|
||||
import org.elasticsearch.xpack.security.transport.SecurityServerTransportService;
|
||||
import org.elasticsearch.xpack.security.transport.filter.IPFilter;
|
||||
import org.elasticsearch.xpack.security.transport.netty3.SecurityNetty3HttpServerTransport;
|
||||
|
@ -219,15 +222,15 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
if (enabled == false) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
AnonymousUser.initialize(settings); // TODO: this is sketchy...testing is difficult b/c it is static....
|
||||
|
||||
List<Object> components = new ArrayList<>();
|
||||
final SecurityContext securityContext = new SecurityContext(settings, threadPool, cryptoService);
|
||||
components.add(securityContext);
|
||||
|
||||
// realms construction
|
||||
final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, threadPool);
|
||||
final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore);
|
||||
final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client);
|
||||
final AnonymousUser anonymousUser = new AnonymousUser(settings);
|
||||
final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, anonymousUser);
|
||||
Map<String, Realm.Factory> realmFactories = new HashMap<>();
|
||||
realmFactories.put(FileRealm.TYPE, config -> new FileRealm(config, resourceWatcherService));
|
||||
realmFactories.put(NativeRealm.TYPE, config -> new NativeRealm(config, nativeUsersStore));
|
||||
|
@ -246,6 +249,7 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
final Realms realms = new Realms(settings, env, realmFactories, licenseState, reservedRealm);
|
||||
components.add(nativeUsersStore);
|
||||
components.add(realms);
|
||||
components.add(reservedRealm);
|
||||
|
||||
// audit trails construction
|
||||
IndexAuditTrail indexAuditTrail = null;
|
||||
|
@ -294,7 +298,7 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
}
|
||||
|
||||
final AuthenticationService authcService = new AuthenticationService(settings, realms, auditTrailService,
|
||||
cryptoService, failureHandler, threadPool);
|
||||
cryptoService, failureHandler, threadPool, anonymousUser);
|
||||
components.add(authcService);
|
||||
|
||||
final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService);
|
||||
|
@ -302,7 +306,7 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(securityContext);
|
||||
final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore);
|
||||
final AuthorizationService authzService = new AuthorizationService(settings, allRolesStore, clusterService,
|
||||
auditTrailService, failureHandler, threadPool);
|
||||
auditTrailService, failureHandler, threadPool, anonymousUser);
|
||||
components.add(fileRolesStore); // has lifecycle
|
||||
components.add(nativeRolesStore); // used by roles actions
|
||||
components.add(reservedRolesStore); // used by roles actions
|
||||
|
@ -458,7 +462,8 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
new ActionHandler<>(PutRoleAction.INSTANCE, TransportPutRoleAction.class),
|
||||
new ActionHandler<>(DeleteRoleAction.INSTANCE, TransportDeleteRoleAction.class),
|
||||
new ActionHandler<>(ChangePasswordAction.INSTANCE, TransportChangePasswordAction.class),
|
||||
new ActionHandler<>(AuthenticateAction.INSTANCE, TransportAuthenticateAction.class));
|
||||
new ActionHandler<>(AuthenticateAction.INSTANCE, TransportAuthenticateAction.class),
|
||||
new ActionHandler<>(SetEnabledAction.INSTANCE, TransportSetEnabledAction.class));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -487,7 +492,8 @@ public class Security implements ActionPlugin, IngestPlugin {
|
|||
RestGetRolesAction.class,
|
||||
RestPutRoleAction.class,
|
||||
RestDeleteRoleAction.class,
|
||||
RestChangePasswordAction.class);
|
||||
RestChangePasswordAction.class,
|
||||
RestSetEnabledAction.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -94,7 +94,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
|
|||
Map<String, Object> auditUsage = auditUsage(auditTrailService);
|
||||
Map<String, Object> ipFilterUsage = ipFilterUsage(ipFilter);
|
||||
Map<String, Object> systemKeyUsage = systemKeyUsage(cryptoService);
|
||||
Map<String, Object> anonymousUsage = Collections.singletonMap("enabled", AnonymousUser.enabled());
|
||||
Map<String, Object> anonymousUsage = Collections.singletonMap("enabled", AnonymousUser.isAnonymousEnabled(settings));
|
||||
return new Usage(available(), enabled(), realmsUsage, rolesStoreUsage, sslUsage, auditUsage, ipFilterUsage, systemKeyUsage,
|
||||
anonymousUsage);
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.action.role;
|
|||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -17,14 +18,25 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
/**
|
||||
* A request delete a role from the security index
|
||||
*/
|
||||
public class DeleteRoleRequest extends ActionRequest<DeleteRoleRequest> {
|
||||
public class DeleteRoleRequest extends ActionRequest<DeleteRoleRequest> implements WriteRequest<DeleteRoleRequest> {
|
||||
|
||||
private String name;
|
||||
private boolean refresh = true;
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE;
|
||||
|
||||
public DeleteRoleRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteRoleRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
this.refreshPolicy = refreshPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RefreshPolicy getRefreshPolicy() {
|
||||
return refreshPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
@ -42,25 +54,17 @@ public class DeleteRoleRequest extends ActionRequest<DeleteRoleRequest> {
|
|||
return name;
|
||||
}
|
||||
|
||||
public void refresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return refresh;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
name = in.readString();
|
||||
refresh = in.readBoolean();
|
||||
refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(name);
|
||||
out.writeBoolean(refresh);
|
||||
refreshPolicy.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,12 +6,14 @@
|
|||
package org.elasticsearch.xpack.security.action.role;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* A builder for requests to delete a role from the security index
|
||||
*/
|
||||
public class DeleteRoleRequestBuilder extends ActionRequestBuilder<DeleteRoleRequest, DeleteRoleResponse, DeleteRoleRequestBuilder> {
|
||||
public class DeleteRoleRequestBuilder extends ActionRequestBuilder<DeleteRoleRequest, DeleteRoleResponse, DeleteRoleRequestBuilder>
|
||||
implements WriteRequestBuilder<DeleteRoleRequestBuilder> {
|
||||
|
||||
public DeleteRoleRequestBuilder(ElasticsearchClient client) {
|
||||
this(client, DeleteRoleAction.INSTANCE);
|
||||
|
@ -25,9 +27,4 @@ public class DeleteRoleRequestBuilder extends ActionRequestBuilder<DeleteRoleReq
|
|||
request.name(name);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DeleteRoleRequestBuilder refresh(boolean refresh) {
|
||||
request.refresh(refresh);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,10 +30,7 @@ public class AuthenticateRequest extends ActionRequest<AuthenticateRequest> impl
|
|||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
Validation.Error error = Validation.Users.validateUsername(username);
|
||||
if (error != null) {
|
||||
return addValidationError(error.toString(), null);
|
||||
}
|
||||
// we cannot apply our validation rules here as an authenticate request could be for an LDAP user that doesn't fit our restrictions
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.action.user;
|
|||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -17,10 +18,10 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
/**
|
||||
* A request to delete a native user.
|
||||
*/
|
||||
public class DeleteUserRequest extends ActionRequest<DeleteUserRequest> implements UserRequest {
|
||||
public class DeleteUserRequest extends ActionRequest<DeleteUserRequest> implements UserRequest, WriteRequest<DeleteUserRequest> {
|
||||
|
||||
private String username;
|
||||
private boolean refresh = true;
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE;
|
||||
|
||||
public DeleteUserRequest() {
|
||||
}
|
||||
|
@ -29,6 +30,17 @@ public class DeleteUserRequest extends ActionRequest<DeleteUserRequest> implemen
|
|||
this.username = username;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteUserRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
this.refreshPolicy = refreshPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RefreshPolicy getRefreshPolicy() {
|
||||
return refreshPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
@ -42,18 +54,10 @@ public class DeleteUserRequest extends ActionRequest<DeleteUserRequest> implemen
|
|||
return this.username;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return refresh;
|
||||
}
|
||||
|
||||
public void username(String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
public void refresh(boolean refresh) {
|
||||
this.refresh = refresh;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] usernames() {
|
||||
return new String[] { username };
|
||||
|
@ -63,14 +67,14 @@ public class DeleteUserRequest extends ActionRequest<DeleteUserRequest> implemen
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
username = in.readString();
|
||||
refresh = in.readBoolean();
|
||||
refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(username);
|
||||
out.writeBoolean(refresh);
|
||||
refreshPolicy.writeTo(out);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,9 +6,11 @@
|
|||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class DeleteUserRequestBuilder extends ActionRequestBuilder<DeleteUserRequest, DeleteUserResponse, DeleteUserRequestBuilder> {
|
||||
public class DeleteUserRequestBuilder extends ActionRequestBuilder<DeleteUserRequest, DeleteUserResponse, DeleteUserRequestBuilder>
|
||||
implements WriteRequestBuilder<DeleteUserRequestBuilder> {
|
||||
|
||||
public DeleteUserRequestBuilder(ElasticsearchClient client) {
|
||||
this(client, DeleteUserAction.INSTANCE);
|
||||
|
@ -22,9 +24,4 @@ public class DeleteUserRequestBuilder extends ActionRequestBuilder<DeleteUserReq
|
|||
request.username(username);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DeleteUserRequestBuilder refresh(boolean refresh) {
|
||||
request.refresh(refresh);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.xpack.security.authc.support.CharArrays;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -46,6 +47,10 @@ public class PutUserRequest extends ActionRequest<PutUserRequest> implements Use
|
|||
if (roles == null) {
|
||||
validationException = addValidationError("roles are missing", validationException);
|
||||
}
|
||||
if (metadata != null && MetadataUtils.containsReservedMetadata(metadata)) {
|
||||
validationException = addValidationError("metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]",
|
||||
validationException);
|
||||
}
|
||||
// we do not check for a password hash here since it is possible that the user exists and we don't want to update the password
|
||||
return validationException;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* This action is for setting the enabled flag on a native or reserved user
|
||||
*/
|
||||
public class SetEnabledAction extends Action<SetEnabledRequest, SetEnabledResponse, SetEnabledRequestBuilder> {
|
||||
|
||||
public static final SetEnabledAction INSTANCE = new SetEnabledAction();
|
||||
public static final String NAME = "cluster:admin/xpack/security/user/set_enabled";
|
||||
|
||||
private SetEnabledAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetEnabledRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SetEnabledRequestBuilder(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetEnabledResponse newResponse() {
|
||||
return new SetEnabledResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.security.support.Validation.Error;
|
||||
import org.elasticsearch.xpack.security.support.Validation.Users;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* The request that allows to set a user as enabled or disabled
|
||||
*/
|
||||
public class SetEnabledRequest extends ActionRequest<SetEnabledRequest> implements UserRequest, WriteRequest<SetEnabledRequest> {
|
||||
|
||||
private Boolean enabled;
|
||||
private String username;
|
||||
private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE;
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
Error error = Users.validateUsername(username, true, Settings.EMPTY);
|
||||
if (error != null) {
|
||||
validationException = addValidationError(error.toString(), validationException);
|
||||
}
|
||||
if (enabled == null) {
|
||||
validationException = addValidationError("enabled must be set", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether the user should be set to enabled or not
|
||||
*/
|
||||
public Boolean enabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether the user should be enabled or not.
|
||||
*/
|
||||
public void enabled(boolean enabled) {
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the username that this request applies to.
|
||||
*/
|
||||
public String username() {
|
||||
return username;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the username that the request applies to. Must not be {@code null}
|
||||
*/
|
||||
public void username(String username) {
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] usernames() {
|
||||
return new String[] { username };
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh (
|
||||
* {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}).
|
||||
*/
|
||||
@Override
|
||||
public RefreshPolicy getRefreshPolicy() {
|
||||
return refreshPolicy;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SetEnabledRequest setRefreshPolicy(RefreshPolicy refreshPolicy) {
|
||||
this.refreshPolicy = refreshPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
this.enabled = in.readBoolean();
|
||||
this.username = in.readString();
|
||||
this.refreshPolicy = RefreshPolicy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(enabled);
|
||||
out.writeString(username);
|
||||
refreshPolicy.writeTo(out);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Request builder for setting a user as enabled or disabled
|
||||
*/
|
||||
public class SetEnabledRequestBuilder extends ActionRequestBuilder<SetEnabledRequest, SetEnabledResponse, SetEnabledRequestBuilder>
|
||||
implements WriteRequestBuilder<SetEnabledRequestBuilder> {
|
||||
|
||||
public SetEnabledRequestBuilder(ElasticsearchClient client) {
|
||||
super(client, SetEnabledAction.INSTANCE, new SetEnabledRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the username of the user that should enabled or disabled. Must not be {@code null}
|
||||
*/
|
||||
public SetEnabledRequestBuilder username(String username) {
|
||||
request.username(username);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether the user should be enabled or not
|
||||
*/
|
||||
public SetEnabledRequestBuilder enabled(boolean enabled) {
|
||||
request.enabled(enabled);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
||||
/**
|
||||
* Empty response for a {@link SetEnabledRequest}
|
||||
*/
|
||||
public class SetEnabledResponse extends ActionResponse {
|
||||
}
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.security.user.SystemUser;
|
|||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -36,7 +37,7 @@ public class TransportAuthenticateAction extends HandledTransportAction<Authenti
|
|||
@Override
|
||||
protected void doExecute(AuthenticateRequest request, ActionListener<AuthenticateResponse> listener) {
|
||||
final User user = securityContext.getUser();
|
||||
if (SystemUser.is(user)) {
|
||||
if (SystemUser.is(user) || XPackUser.is(user)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + user.principal() + "] is internal"));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.xpack.security.user.AnonymousUser;
|
|||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -35,10 +36,10 @@ public class TransportChangePasswordAction extends HandledTransportAction<Change
|
|||
@Override
|
||||
protected void doExecute(ChangePasswordRequest request, ActionListener<ChangePasswordResponse> listener) {
|
||||
final String username = request.username();
|
||||
if (AnonymousUser.isAnonymousUsername(username)) {
|
||||
if (AnonymousUser.isAnonymousUsername(username, settings)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified via the API"));
|
||||
return;
|
||||
} else if (SystemUser.NAME.equals(username)) {
|
||||
} else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal"));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.security.user.AnonymousUser;
|
|||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
public class TransportDeleteUserAction extends HandledTransportAction<DeleteUserRequest, DeleteUserResponse> {
|
||||
|
||||
|
@ -34,15 +35,15 @@ public class TransportDeleteUserAction extends HandledTransportAction<DeleteUser
|
|||
@Override
|
||||
protected void doExecute(DeleteUserRequest request, final ActionListener<DeleteUserResponse> listener) {
|
||||
final String username = request.username();
|
||||
if (ReservedRealm.isReserved(username)) {
|
||||
if (AnonymousUser.isAnonymousUsername(username)) {
|
||||
if (ReservedRealm.isReserved(username, settings)) {
|
||||
if (AnonymousUser.isAnonymousUsername(username, settings)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be deleted"));
|
||||
return;
|
||||
} else {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is reserved and cannot be deleted"));
|
||||
return;
|
||||
}
|
||||
} else if (SystemUser.NAME.equals(username)) {
|
||||
} else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal"));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -29,14 +29,16 @@ import static org.elasticsearch.common.Strings.arrayToDelimitedString;
|
|||
public class TransportGetUsersAction extends HandledTransportAction<GetUsersRequest, GetUsersResponse> {
|
||||
|
||||
private final NativeUsersStore usersStore;
|
||||
private final ReservedRealm reservedRealm;
|
||||
|
||||
@Inject
|
||||
public TransportGetUsersAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NativeUsersStore usersStore,
|
||||
TransportService transportService) {
|
||||
TransportService transportService, ReservedRealm reservedRealm) {
|
||||
super(settings, GetUsersAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
GetUsersRequest::new);
|
||||
this.usersStore = usersStore;
|
||||
this.reservedRealm = reservedRealm;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -48,16 +50,13 @@ public class TransportGetUsersAction extends HandledTransportAction<GetUsersRequ
|
|||
|
||||
if (specificUsersRequested) {
|
||||
for (String username : requestedUsers) {
|
||||
if (ReservedRealm.isReserved(username)) {
|
||||
User user = ReservedRealm.getUser(username);
|
||||
if (ReservedRealm.isReserved(username, settings)) {
|
||||
User user = reservedRealm.lookupUser(username);
|
||||
// a user could be null if the service isn't ready or we requested the anonymous user and it is not enabled
|
||||
if (user != null) {
|
||||
users.add(user);
|
||||
} else {
|
||||
// the only time a user should be null is if username matches for the anonymous user and the anonymous user is not
|
||||
// enabled!
|
||||
assert AnonymousUser.enabled() == false && AnonymousUser.isAnonymousUsername(username);
|
||||
}
|
||||
} else if (SystemUser.NAME.equals(username)) {
|
||||
} else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal"));
|
||||
return;
|
||||
} else {
|
||||
|
@ -65,7 +64,7 @@ public class TransportGetUsersAction extends HandledTransportAction<GetUsersRequ
|
|||
}
|
||||
}
|
||||
} else {
|
||||
users.addAll(ReservedRealm.users());
|
||||
users.addAll(reservedRealm.users());
|
||||
}
|
||||
|
||||
if (usersToSearchFor.size() == 1) {
|
||||
|
|
|
@ -19,6 +19,7 @@ import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore;
|
|||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
public class TransportPutUserAction extends HandledTransportAction<PutUserRequest, PutUserResponse> {
|
||||
|
||||
|
@ -35,8 +36,8 @@ public class TransportPutUserAction extends HandledTransportAction<PutUserReques
|
|||
@Override
|
||||
protected void doExecute(final PutUserRequest request, final ActionListener<PutUserResponse> listener) {
|
||||
final String username = request.username();
|
||||
if (ReservedRealm.isReserved(username)) {
|
||||
if (AnonymousUser.isAnonymousUsername(username)) {
|
||||
if (ReservedRealm.isReserved(username, settings)) {
|
||||
if (AnonymousUser.isAnonymousUsername(username, settings)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified via the API"));
|
||||
return;
|
||||
} else {
|
||||
|
@ -44,7 +45,7 @@ public class TransportPutUserAction extends HandledTransportAction<PutUserReques
|
|||
"password can be changed"));
|
||||
return;
|
||||
}
|
||||
} else if (SystemUser.NAME.equals(username)) {
|
||||
} else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal"));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.user;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore;
|
||||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
/**
|
||||
* Transport action that handles setting a native or reserved user to enabled
|
||||
*/
|
||||
public class TransportSetEnabledAction extends HandledTransportAction<SetEnabledRequest, SetEnabledResponse> {
|
||||
|
||||
private final NativeUsersStore usersStore;
|
||||
|
||||
@Inject
|
||||
public TransportSetEnabledAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
NativeUsersStore usersStore) {
|
||||
super(settings, SetEnabledAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SetEnabledRequest::new);
|
||||
this.usersStore = usersStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SetEnabledRequest request, ActionListener<SetEnabledResponse> listener) {
|
||||
final String username = request.username();
|
||||
// make sure the user is not disabling themselves
|
||||
if (Authentication.getAuthentication(threadPool.getThreadContext()).getRunAsUser().principal().equals(request.username())) {
|
||||
listener.onFailure(new IllegalArgumentException("users may not update the enabled status of their own account"));
|
||||
return;
|
||||
} else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal"));
|
||||
return;
|
||||
} else if (AnonymousUser.isAnonymousUsername(username, settings)) {
|
||||
listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified using the api"));
|
||||
return;
|
||||
}
|
||||
|
||||
usersStore.setEnabled(username, request.enabled(), request.getRefreshPolicy(), new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void v) {
|
||||
listener.onResponse(new SetEnabledResponse());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -3,17 +3,18 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.audit.index;
|
||||
package org.elasticsearch.xpack.security.audit;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
public enum IndexAuditLevel {
|
||||
public enum AuditLevel {
|
||||
|
||||
ANONYMOUS_ACCESS_DENIED,
|
||||
AUTHENTICATION_FAILED,
|
||||
REALM_AUTHENTICATION_FAILED,
|
||||
ACCESS_GRANTED,
|
||||
ACCESS_DENIED,
|
||||
TAMPERED_REQUEST,
|
||||
|
@ -23,13 +24,13 @@ public enum IndexAuditLevel {
|
|||
RUN_AS_GRANTED,
|
||||
RUN_AS_DENIED;
|
||||
|
||||
static EnumSet<IndexAuditLevel> parse(List<String> levels) {
|
||||
EnumSet<IndexAuditLevel> enumSet = EnumSet.noneOf(IndexAuditLevel.class);
|
||||
static EnumSet<AuditLevel> parse(List<String> levels) {
|
||||
EnumSet<AuditLevel> enumSet = EnumSet.noneOf(AuditLevel.class);
|
||||
for (String level : levels) {
|
||||
String lowerCaseLevel = level.trim().toLowerCase(Locale.ROOT);
|
||||
switch (lowerCaseLevel) {
|
||||
case "_all":
|
||||
enumSet.addAll(Arrays.asList(IndexAuditLevel.values()));
|
||||
enumSet.addAll(Arrays.asList(AuditLevel.values()));
|
||||
break;
|
||||
case "anonymous_access_denied":
|
||||
enumSet.add(ANONYMOUS_ACCESS_DENIED);
|
||||
|
@ -37,6 +38,9 @@ public enum IndexAuditLevel {
|
|||
case "authentication_failed":
|
||||
enumSet.add(AUTHENTICATION_FAILED);
|
||||
break;
|
||||
case "realm_authentication_failed":
|
||||
enumSet.add(REALM_AUTHENTICATION_FAILED);
|
||||
break;
|
||||
case "access_granted":
|
||||
enumSet.add(ACCESS_GRANTED);
|
||||
break;
|
||||
|
@ -68,9 +72,9 @@ public enum IndexAuditLevel {
|
|||
return enumSet;
|
||||
}
|
||||
|
||||
public static EnumSet<IndexAuditLevel> parse(List<String> includeLevels, List<String> excludeLevels) {
|
||||
EnumSet<IndexAuditLevel> included = parse(includeLevels);
|
||||
EnumSet<IndexAuditLevel> excluded = parse(excludeLevels);
|
||||
public static EnumSet<AuditLevel> parse(List<String> includeLevels, List<String> excludeLevels) {
|
||||
EnumSet<AuditLevel> included = parse(includeLevels);
|
||||
EnumSet<AuditLevel> excluded = parse(excludeLevels);
|
||||
included.removeAll(excluded);
|
||||
return included;
|
||||
}
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportMessage;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.security.audit.AuditLevel;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrail;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.SystemPrivilege;
|
||||
|
@ -85,19 +86,20 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.xpack.security.Security.setting;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.REALM_AUTHENTICATION_FAILED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditUtil.indices;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.ANONYMOUS_ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.AUTHENTICATION_FAILED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.CONNECTION_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.CONNECTION_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.RUN_AS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.RUN_AS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.SYSTEM_ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.TAMPERED_REQUEST;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexAuditLevel.parse;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ANONYMOUS_ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_FAILED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.SYSTEM_ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.TAMPERED_REQUEST;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.parse;
|
||||
import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.resolve;
|
||||
|
||||
/**
|
||||
|
@ -105,27 +107,30 @@ import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.res
|
|||
*/
|
||||
public class IndexAuditTrail extends AbstractComponent implements AuditTrail, ClusterStateListener {
|
||||
|
||||
public static final int DEFAULT_BULK_SIZE = 1000;
|
||||
public static final int MAX_BULK_SIZE = 10000;
|
||||
public static final int DEFAULT_MAX_QUEUE_SIZE = 1000;
|
||||
public static final TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(1);
|
||||
public static final IndexNameResolver.Rollover DEFAULT_ROLLOVER = IndexNameResolver.Rollover.DAILY;
|
||||
|
||||
public static final String NAME = "index";
|
||||
public static final String INDEX_NAME_PREFIX = ".security_audit_log";
|
||||
public static final String DOC_TYPE = "event";
|
||||
public static final Setting<IndexNameResolver.Rollover> ROLLOVER_SETTING =
|
||||
public static final String INDEX_TEMPLATE_NAME = "security_audit_log";
|
||||
|
||||
private static final int DEFAULT_BULK_SIZE = 1000;
|
||||
private static final int MAX_BULK_SIZE = 10000;
|
||||
private static final int DEFAULT_MAX_QUEUE_SIZE = 1000;
|
||||
private static final TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(1);
|
||||
private static final IndexNameResolver.Rollover DEFAULT_ROLLOVER = IndexNameResolver.Rollover.DAILY;
|
||||
private static final Setting<IndexNameResolver.Rollover> ROLLOVER_SETTING =
|
||||
new Setting<>(setting("audit.index.rollover"), (s) -> DEFAULT_ROLLOVER.name(),
|
||||
s -> IndexNameResolver.Rollover.valueOf(s.toUpperCase(Locale.ENGLISH)), Property.NodeScope);
|
||||
public static final Setting<Integer> QUEUE_SIZE_SETTING =
|
||||
private static final Setting<Integer> QUEUE_SIZE_SETTING =
|
||||
Setting.intSetting(setting("audit.index.queue_max_size"), DEFAULT_MAX_QUEUE_SIZE, 1, Property.NodeScope);
|
||||
public static final String INDEX_TEMPLATE_NAME = "security_audit_log";
|
||||
public static final String DEFAULT_CLIENT_NAME = "security-audit-client";
|
||||
private static final String DEFAULT_CLIENT_NAME = "security-audit-client";
|
||||
|
||||
static final List<String> DEFAULT_EVENT_INCLUDES = Arrays.asList(
|
||||
private static final List<String> DEFAULT_EVENT_INCLUDES = Arrays.asList(
|
||||
ACCESS_DENIED.toString(),
|
||||
ACCESS_GRANTED.toString(),
|
||||
ANONYMOUS_ACCESS_DENIED.toString(),
|
||||
AUTHENTICATION_FAILED.toString(),
|
||||
REALM_AUTHENTICATION_FAILED.toString(),
|
||||
CONNECTION_DENIED.toString(),
|
||||
CONNECTION_GRANTED.toString(),
|
||||
TAMPERED_REQUEST.toString(),
|
||||
|
@ -134,23 +139,24 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
);
|
||||
private static final String FORBIDDEN_INDEX_SETTING = "index.mapper.dynamic";
|
||||
|
||||
public static final Setting<Settings> INDEX_SETTINGS =
|
||||
private static final Setting<Settings> INDEX_SETTINGS =
|
||||
Setting.groupSetting(setting("audit.index.settings.index."), Property.NodeScope);
|
||||
public static final Setting<List<String>> INCLUDE_EVENT_SETTINGS =
|
||||
private static final Setting<List<String>> INCLUDE_EVENT_SETTINGS =
|
||||
Setting.listSetting(setting("audit.index.events.include"), DEFAULT_EVENT_INCLUDES, Function.identity(),
|
||||
Property.NodeScope);
|
||||
public static final Setting<List<String>> EXCLUDE_EVENT_SETTINGS =
|
||||
private static final Setting<List<String>> EXCLUDE_EVENT_SETTINGS =
|
||||
Setting.listSetting(setting("audit.index.events.exclude"), Collections.emptyList(),
|
||||
Function.identity(), Property.NodeScope);
|
||||
public static final Setting<Settings> REMOTE_CLIENT_SETTINGS =
|
||||
private static final Setting<Boolean> INCLUDE_REQUEST_BODY =
|
||||
Setting.boolSetting(setting("audit.index.events.emit_request_body"), false, Property.NodeScope);
|
||||
private static final Setting<Settings> REMOTE_CLIENT_SETTINGS =
|
||||
Setting.groupSetting(setting("audit.index.client."), Property.NodeScope);
|
||||
public static final Setting<Integer> BULK_SIZE_SETTING =
|
||||
private static final Setting<Integer> BULK_SIZE_SETTING =
|
||||
Setting.intSetting(setting("audit.index.bulk_size"), DEFAULT_BULK_SIZE, 1, MAX_BULK_SIZE, Property.NodeScope);
|
||||
public static final Setting<TimeValue> FLUSH_TIMEOUT_SETTING =
|
||||
private static final Setting<TimeValue> FLUSH_TIMEOUT_SETTING =
|
||||
Setting.timeSetting(setting("audit.index.flush_interval"), DEFAULT_FLUSH_INTERVAL,
|
||||
TimeValue.timeValueMillis(1L), Property.NodeScope);
|
||||
|
||||
|
||||
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED);
|
||||
private final String nodeName;
|
||||
private final Client client;
|
||||
|
@ -160,12 +166,13 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
private final Lock putMappingLock = new ReentrantLock();
|
||||
private final ClusterService clusterService;
|
||||
private final boolean indexToRemoteCluster;
|
||||
private final EnumSet<AuditLevel> events;
|
||||
private final IndexNameResolver.Rollover rollover;
|
||||
private final boolean includeRequestBody;
|
||||
|
||||
private BulkProcessor bulkProcessor;
|
||||
private IndexNameResolver.Rollover rollover;
|
||||
private String nodeHostName;
|
||||
private String nodeHostAddress;
|
||||
private EnumSet<IndexAuditLevel> events;
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
|
@ -180,25 +187,10 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
this.queueConsumer = new QueueConsumer(EsExecutors.threadName(settings, "audit-queue-consumer"));
|
||||
int maxQueueSize = QUEUE_SIZE_SETTING.get(settings);
|
||||
this.eventQueue = createQueue(maxQueueSize);
|
||||
|
||||
// we have to initialize this here since we use rollover in determining if we can start...
|
||||
rollover = ROLLOVER_SETTING.get(settings);
|
||||
|
||||
// we have to initialize the events here since we can receive events before starting...
|
||||
List<String> includedEvents = INCLUDE_EVENT_SETTINGS.get(settings);
|
||||
List<String> excludedEvents = EXCLUDE_EVENT_SETTINGS.get(settings);
|
||||
try {
|
||||
events = parse(includedEvents, excludedEvents);
|
||||
} catch (IllegalArgumentException e) {
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"invalid event type specified, using default for audit index output. include events [{}], exclude events [{}]",
|
||||
includedEvents,
|
||||
excludedEvents),
|
||||
e);
|
||||
events = parse(DEFAULT_EVENT_INCLUDES, Collections.emptyList());
|
||||
}
|
||||
this.rollover = ROLLOVER_SETTING.get(settings);
|
||||
this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings));
|
||||
this.indexToRemoteCluster = REMOTE_CLIENT_SETTINGS.get(settings).names().size() > 0;
|
||||
this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings);
|
||||
|
||||
if (indexToRemoteCluster == false) {
|
||||
// in the absence of client settings for remote indexing, fall back to the client that was passed in.
|
||||
|
@ -391,7 +383,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
|
||||
@Override
|
||||
public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) {
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
if (events.contains(REALM_AUTHENTICATION_FAILED)) {
|
||||
if (XPackUser.is(token.principal()) == false) {
|
||||
try {
|
||||
enqueue(message("authentication_failed", action, token, realm, indices(message), message), "authentication_failed");
|
||||
|
@ -404,7 +396,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
|
||||
@Override
|
||||
public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) {
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
if (events.contains(REALM_AUTHENTICATION_FAILED)) {
|
||||
if (XPackUser.is(token.principal()) == false) {
|
||||
try {
|
||||
enqueue(message("authentication_failed", null, token, realm, null, request), "authentication_failed");
|
||||
|
@ -610,7 +602,9 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
if (indices != null) {
|
||||
msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY));
|
||||
}
|
||||
msg.builder.field(Field.REQUEST_BODY, restRequestContent(request));
|
||||
if (includeRequestBody) {
|
||||
msg.builder.field(Field.REQUEST_BODY, restRequestContent(request));
|
||||
}
|
||||
msg.builder.field(Field.ORIGIN_TYPE, "rest");
|
||||
SocketAddress address = request.getRemoteAddress();
|
||||
if (address instanceof InetSocketAddress) {
|
||||
|
@ -630,7 +624,9 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
common("rest", type, msg.builder);
|
||||
|
||||
msg.builder.field(Field.PRINCIPAL, user.principal());
|
||||
msg.builder.field(Field.REQUEST_BODY, restRequestContent(request));
|
||||
if (includeRequestBody) {
|
||||
msg.builder.field(Field.REQUEST_BODY, restRequestContent(request));
|
||||
}
|
||||
msg.builder.field(Field.ORIGIN_TYPE, "rest");
|
||||
SocketAddress address = request.getRemoteAddress();
|
||||
if (address instanceof InetSocketAddress) {
|
||||
|
@ -905,6 +901,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
|||
settings.add(FLUSH_TIMEOUT_SETTING);
|
||||
settings.add(QUEUE_SIZE_SETTING);
|
||||
settings.add(REMOTE_CLIENT_SETTINGS);
|
||||
settings.add(INCLUDE_REQUEST_BODY);
|
||||
}
|
||||
|
||||
private class QueueConsumer extends Thread {
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportMessage;
|
||||
import org.elasticsearch.xpack.security.audit.AuditLevel;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrail;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.SystemPrivilege;
|
||||
|
@ -32,11 +33,27 @@ import org.elasticsearch.xpack.security.user.XPackUser;
|
|||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString;
|
||||
import static org.elasticsearch.xpack.security.Security.setting;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.ANONYMOUS_ACCESS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_FAILED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.REALM_AUTHENTICATION_FAILED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_DENIED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.SYSTEM_ACCESS_GRANTED;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.TAMPERED_REQUEST;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditLevel.parse;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditUtil.indices;
|
||||
import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent;
|
||||
|
||||
|
@ -52,10 +69,28 @@ public class LoggingAuditTrail extends AbstractComponent implements AuditTrail {
|
|||
Setting.boolSetting(setting("audit.logfile.prefix.emit_node_host_name"), false, Property.NodeScope);
|
||||
public static final Setting<Boolean> NODE_NAME_SETTING =
|
||||
Setting.boolSetting(setting("audit.logfile.prefix.emit_node_name"), true, Property.NodeScope);
|
||||
private static final List<String> DEFAULT_EVENT_INCLUDES = Arrays.asList(
|
||||
ACCESS_DENIED.toString(),
|
||||
ACCESS_GRANTED.toString(),
|
||||
ANONYMOUS_ACCESS_DENIED.toString(),
|
||||
AUTHENTICATION_FAILED.toString(),
|
||||
CONNECTION_DENIED.toString(),
|
||||
TAMPERED_REQUEST.toString(),
|
||||
RUN_AS_DENIED.toString(),
|
||||
RUN_AS_GRANTED.toString()
|
||||
);
|
||||
private static final Setting<List<String>> INCLUDE_EVENT_SETTINGS =
|
||||
Setting.listSetting(setting("audit.logfile.events.include"), DEFAULT_EVENT_INCLUDES, Function.identity(), Property.NodeScope);
|
||||
private static final Setting<List<String>> EXCLUDE_EVENT_SETTINGS =
|
||||
Setting.listSetting(setting("audit.logfile.events.exclude"), Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
private static final Setting<Boolean> INCLUDE_REQUEST_BODY =
|
||||
Setting.boolSetting(setting("audit.logfile.events.emit_request_body"), false, Property.NodeScope);
|
||||
|
||||
private final Logger logger;
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadContext threadContext;
|
||||
private final EnumSet<AuditLevel> events;
|
||||
private final boolean includeRequestBody;
|
||||
|
||||
private String prefix;
|
||||
|
||||
|
@ -73,6 +108,8 @@ public class LoggingAuditTrail extends AbstractComponent implements AuditTrail {
|
|||
this.logger = logger;
|
||||
this.clusterService = clusterService;
|
||||
this.threadContext = threadContext;
|
||||
this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings));
|
||||
this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings);
|
||||
}
|
||||
|
||||
private String getPrefix() {
|
||||
|
@ -84,300 +121,240 @@ public class LoggingAuditTrail extends AbstractComponent implements AuditTrail {
|
|||
|
||||
@Override
|
||||
public void anonymousAccessDenied(String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [anonymous_access_denied]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
if (events.contains(ANONYMOUS_ACCESS_DENIED)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [anonymous_access_denied]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.warn("{}[transport] [anonymous_access_denied]\t{}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [anonymous_access_denied]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.warn("{}[transport] [anonymous_access_denied]\t{}, action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action);
|
||||
logger.info("{}[transport] [anonymous_access_denied]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, message.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void anonymousAccessDenied(RestRequest request) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[rest] [anonymous_access_denied]\t{}, uri=[{}], request_body=[{}]", getPrefix(),
|
||||
hostAttributes(request), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.warn("{}[rest] [anonymous_access_denied]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
if (events.contains(ANONYMOUS_ACCESS_DENIED)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [anonymous_access_denied]\t{}, uri=[{}], request_body=[{}]", getPrefix(),
|
||||
hostAttributes(request), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[rest] [anonymous_access_denied]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(AuthenticationToken token, String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], indices=[{}], request=[{}]",
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], indices=[{}], request=[{}]",
|
||||
getPrefix(), originAttributes(message, clusterService.localNode(), threadContext), token.principal(),
|
||||
action, indices, message.getClass().getSimpleName());
|
||||
action, indices, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), token.principal(), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), token.principal(), action,
|
||||
logger.info("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), token.principal(), action,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), token.principal(), action);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(RestRequest request) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[rest] [authentication_failed]\t{}, uri=[{}], request_body=[{}]", getPrefix(), hostAttributes(request),
|
||||
request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.error("{}[rest] [authentication_failed]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [authentication_failed]\t{}, uri=[{}], request_body=[{}]", getPrefix(), hostAttributes(request),
|
||||
request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[rest] [authentication_failed]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [authentication_failed]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [authentication_failed]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [authentication_failed]\t{}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [authentication_failed]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [authentication_failed]\t{}, action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action);
|
||||
logger.info("{}[transport] [authentication_failed]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, message.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(AuthenticationToken token, RestRequest request) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}], request_body=[{}]", getPrefix(),
|
||||
hostAttributes(request), token.principal(), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.error("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}]", getPrefix(), hostAttributes(request),
|
||||
token.principal(), request.uri());
|
||||
if (events.contains(AUTHENTICATION_FAILED)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}], request_body=[{}]", getPrefix(),
|
||||
hostAttributes(request), token.principal(), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}]", getPrefix(), hostAttributes(request),
|
||||
token.principal(), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (events.contains(REALM_AUTHENTICATION_FAILED)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.trace("{}[transport] [authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], indices=[{}], " +
|
||||
"request=[{}]", getPrefix(), realm, originAttributes(message, clusterService.localNode(), threadContext),
|
||||
token.principal(), action, indices, message.getClass().getSimpleName());
|
||||
logger.info("{}[transport] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], indices=[{}], " +
|
||||
"request=[{}]", getPrefix(), realm, originAttributes(message, clusterService.localNode(), threadContext),
|
||||
token.principal(), action, indices, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.trace("{}[transport] [authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], request=[{}]",
|
||||
getPrefix(), realm, originAttributes(message, clusterService.localNode(), threadContext), token.principal(),
|
||||
action, message.getClass().getSimpleName());
|
||||
logger.info("{}[transport] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], request=[{}]",
|
||||
getPrefix(), realm, originAttributes(message, clusterService.localNode(), threadContext), token.principal(),
|
||||
action, message.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}[rest] [authentication_failed]\trealm=[{}], {}, principal=[{}], uri=[{}], request_body=[{}]", getPrefix(),
|
||||
realm, hostAttributes(request), token.principal(), request.uri(), restRequestContent(request));
|
||||
if (events.contains(REALM_AUTHENTICATION_FAILED)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], uri=[{}], request_body=[{}]",
|
||||
getPrefix(), realm, hostAttributes(request), token.principal(), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[rest] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], uri=[{}]", getPrefix(),
|
||||
realm, hostAttributes(request), token.principal(), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accessGranted(User user, String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
|
||||
// special treatment for internal system actions - only log on trace
|
||||
if ((SystemUser.is(user) && SystemPrivilege.INSTANCE.predicate().test(action)) || XPackUser.is(user)) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (indices != null) {
|
||||
logger.trace("{}[transport] [access_granted]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
final boolean isSystem = (SystemUser.is(user) && SystemPrivilege.INSTANCE.predicate().test(action)) || XPackUser.is(user);
|
||||
final boolean logSystemAccessGranted = isSystem && events.contains(SYSTEM_ACCESS_GRANTED);
|
||||
final boolean shouldLog = logSystemAccessGranted || (isSystem == false && events.contains(ACCESS_GRANTED));
|
||||
if (shouldLog) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [access_granted]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.trace("{}[transport] [access_granted]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.info("{}[transport] [access_granted]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action,
|
||||
message.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [access_granted]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.info("{}[transport] [access_granted]\t{}, {}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [access_granted]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.info("{}[transport] [access_granted]\t{}, {}, action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accessDenied(User user, String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [access_denied]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
if (events.contains(ACCESS_DENIED)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [access_denied]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [access_denied]\t{}, {}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [access_denied]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action,
|
||||
logger.info("{}[transport] [access_denied]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [access_denied]\t{}, {}, action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), principal(user), action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tamperedRequest(RestRequest request) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[rest] [tampered_request]\t{}, uri=[{}], request_body=[{}]", getPrefix(), hostAttributes(request),
|
||||
request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.error("{}[rest] [tampered_request]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
if (events.contains(TAMPERED_REQUEST)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [tampered_request]\t{}, uri=[{}], request_body=[{}]", getPrefix(), hostAttributes(request),
|
||||
request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[rest] [tampered_request]\t{}, uri=[{}]", getPrefix(), hostAttributes(request), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tamperedRequest(String action, TransportMessage message) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [tampered_request]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
if (events.contains(TAMPERED_REQUEST)) {
|
||||
String indices = indicesString(message);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [tampered_request]\t{}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [tampered_request]\t{}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [tampered_request]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action,
|
||||
logger.info("{}[transport] [tampered_request]\t{}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action,
|
||||
message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [tampered_request]\t{}, action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tamperedRequest(User user, String action, TransportMessage request) {
|
||||
String indices = indicesString(request);
|
||||
if (indices != null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [tampered_request]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
if (events.contains(TAMPERED_REQUEST)) {
|
||||
String indices = indicesString(request);
|
||||
if (indices != null) {
|
||||
logger.info("{}[transport] [tampered_request]\t{}, {}, action=[{}], indices=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action, indices,
|
||||
request.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [tampered_request]\t{}, {}, action=[{}], indices=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action, indices);
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [tampered_request]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action,
|
||||
logger.info("{}[transport] [tampered_request]\t{}, {}, action=[{}], request=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action,
|
||||
request.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.error("{}[transport] [tampered_request]\t{}, {}, action=[{}]", getPrefix(),
|
||||
originAttributes(request, clusterService.localNode(), threadContext), principal(user), action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}[ip_filter] [connection_granted]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", getPrefix(),
|
||||
if (events.contains(CONNECTION_GRANTED)) {
|
||||
logger.info("{}[ip_filter] [connection_granted]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", getPrefix(),
|
||||
NetworkAddress.format(inetAddress), profile, rule);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) {
|
||||
logger.error("{}[ip_filter] [connection_denied]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", getPrefix(),
|
||||
NetworkAddress.format(inetAddress), profile, rule);
|
||||
if (events.contains(CONNECTION_DENIED)) {
|
||||
logger.info("{}[ip_filter] [connection_denied]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", getPrefix(),
|
||||
NetworkAddress.format(inetAddress), profile, rule);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runAsGranted(User user, String action, TransportMessage message) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [run_as_granted]\t{}, principal=[{}], run_as_principal=[{}], action=[{}], request=[{}]",
|
||||
if (events.contains(RUN_AS_GRANTED)) {
|
||||
logger.info("{}[transport] [run_as_granted]\t{}, principal=[{}], run_as_principal=[{}], action=[{}], request=[{}]",
|
||||
getPrefix(), originAttributes(message, clusterService.localNode(), threadContext), user.principal(),
|
||||
user.runAs().principal(), action, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.info("{}[transport] [run_as_granted]\t{}, principal=[{}], run_as_principal=[{}], action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), user.principal(),
|
||||
user.runAs().principal(), action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runAsDenied(User user, String action, TransportMessage message) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[transport] [run_as_denied]\t{}, principal=[{}], run_as_principal=[{}], action=[{}], request=[{}]",
|
||||
if (events.contains(RUN_AS_DENIED)) {
|
||||
logger.info("{}[transport] [run_as_denied]\t{}, principal=[{}], run_as_principal=[{}], action=[{}], request=[{}]",
|
||||
getPrefix(), originAttributes(message, clusterService.localNode(), threadContext), user.principal(),
|
||||
user.runAs().principal(), action, message.getClass().getSimpleName());
|
||||
} else {
|
||||
logger.info("{}[transport] [run_as_denied]\t{}, principal=[{}], run_as_principal=[{}], action=[{}]", getPrefix(),
|
||||
originAttributes(message, clusterService.localNode(), threadContext), user.principal(),
|
||||
user.runAs().principal(), action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runAsDenied(User user, RestRequest request) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("{}[rest] [run_as_denied]\t{}, principal=[{}], uri=[{}], request_body=[{}]", getPrefix(),
|
||||
if (events.contains(RUN_AS_DENIED)) {
|
||||
if (includeRequestBody) {
|
||||
logger.info("{}[rest] [run_as_denied]\t{}, principal=[{}], uri=[{}], request_body=[{}]", getPrefix(),
|
||||
hostAttributes(request), user.principal(), request.uri(), restRequestContent(request));
|
||||
} else {
|
||||
logger.info("{}[transport] [run_as_denied]\t{}, principal=[{}], uri=[{}]", getPrefix(),
|
||||
hostAttributes(request), user.principal(), request.uri());
|
||||
} else {
|
||||
logger.info("{}[rest] [run_as_denied]\t{}, principal=[{}], uri=[{}]", getPrefix(),
|
||||
hostAttributes(request), user.principal(), request.uri());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,5 +442,8 @@ public class LoggingAuditTrail extends AbstractComponent implements AuditTrail {
|
|||
settings.add(HOST_ADDRESS_SETTING);
|
||||
settings.add(HOST_NAME_SETTING);
|
||||
settings.add(NODE_NAME_SETTING);
|
||||
settings.add(INCLUDE_EVENT_SETTINGS);
|
||||
settings.add(EXCLUDE_EVENT_SETTINGS);
|
||||
settings.add(INCLUDE_REQUEST_BODY);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,11 +50,13 @@ public class AuthenticationService extends AbstractComponent {
|
|||
private final AuthenticationFailureHandler failureHandler;
|
||||
private final ThreadContext threadContext;
|
||||
private final String nodeName;
|
||||
private final AnonymousUser anonymousUser;
|
||||
private final boolean signUserHeader;
|
||||
private final boolean runAsEnabled;
|
||||
private final boolean isAnonymousUserEnabled;
|
||||
|
||||
public AuthenticationService(Settings settings, Realms realms, AuditTrailService auditTrail, CryptoService cryptoService,
|
||||
AuthenticationFailureHandler failureHandler, ThreadPool threadPool) {
|
||||
AuthenticationFailureHandler failureHandler, ThreadPool threadPool, AnonymousUser anonymousUser) {
|
||||
super(settings);
|
||||
this.nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.realms = realms;
|
||||
|
@ -62,8 +64,10 @@ public class AuthenticationService extends AbstractComponent {
|
|||
this.cryptoService = cryptoService;
|
||||
this.failureHandler = failureHandler;
|
||||
this.threadContext = threadPool.getThreadContext();
|
||||
this.anonymousUser = anonymousUser;
|
||||
this.signUserHeader = SIGN_USER_HEADER.get(settings);
|
||||
this.runAsEnabled = RUN_AS_ENABLED.get(settings);
|
||||
this.isAnonymousUserEnabled = AnonymousUser.isAnonymousEnabled(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -157,6 +161,7 @@ public class AuthenticationService extends AbstractComponent {
|
|||
throw handleNullUser(token);
|
||||
}
|
||||
user = lookupRunAsUserIfNecessary(user, token);
|
||||
checkIfUserIsDisabled(user, token);
|
||||
|
||||
final Authentication authentication = new Authentication(user, authenticatedBy, lookedupBy);
|
||||
authentication.writeToContext(threadContext, cryptoService, signUserHeader);
|
||||
|
@ -204,9 +209,9 @@ public class AuthenticationService extends AbstractComponent {
|
|||
if (fallbackUser != null) {
|
||||
RealmRef authenticatedBy = new RealmRef("__fallback", "__fallback", nodeName);
|
||||
authentication = new Authentication(fallbackUser, authenticatedBy, null);
|
||||
} else if (AnonymousUser.enabled()) {
|
||||
} else if (isAnonymousUserEnabled) {
|
||||
RealmRef authenticatedBy = new RealmRef("__anonymous", "__anonymous", nodeName);
|
||||
authentication = new Authentication(AnonymousUser.INSTANCE, authenticatedBy, null);
|
||||
authentication = new Authentication(anonymousUser, authenticatedBy, null);
|
||||
}
|
||||
|
||||
if (authentication != null) {
|
||||
|
@ -297,6 +302,13 @@ public class AuthenticationService extends AbstractComponent {
|
|||
return user;
|
||||
}
|
||||
|
||||
void checkIfUserIsDisabled(User user, AuthenticationToken token) {
|
||||
if (user.enabled() == false || (user.runAs() != null && user.runAs().enabled() == false)) {
|
||||
logger.debug("user [{}] is disabled. failing authentication", user);
|
||||
throw request.authenticationFailed(token);
|
||||
}
|
||||
}
|
||||
|
||||
abstract class AuditableRequest {
|
||||
|
||||
abstract void realmAuthenticationFailed(AuthenticationToken token, String realm);
|
||||
|
|
|
@ -229,7 +229,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
Path usersFile = FileUserPasswdStore.resolveFile(env);
|
||||
Path usersRolesFile = FileUserRolesStore.resolveFile(env);
|
||||
terminal.println("importing users from [" + usersFile + "]...");
|
||||
Map<String, char[]> userToHashedPW = FileUserPasswdStore.parseFile(usersFile, null);
|
||||
Map<String, char[]> userToHashedPW = FileUserPasswdStore.parseFile(usersFile, null, settings);
|
||||
Map<String, String[]> userToRoles = FileUserRolesStore.parseFile(usersRolesFile, null);
|
||||
Set<String> existingUsers;
|
||||
try {
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.esnative;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
|
||||
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
|
||||
|
@ -19,12 +17,11 @@ public class NativeRealm extends CachingUsernamePasswordRealm {
|
|||
|
||||
public static final String TYPE = "native";
|
||||
|
||||
final NativeUsersStore userStore;
|
||||
private final NativeUsersStore userStore;
|
||||
|
||||
public NativeRealm(RealmConfig config, NativeUsersStore usersStore) {
|
||||
super(TYPE, config);
|
||||
this.userStore = usersStore;
|
||||
usersStore.addListener(new Listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -41,14 +38,4 @@ public class NativeRealm extends CachingUsernamePasswordRealm {
|
|||
protected User doAuthenticate(UsernamePasswordToken token) {
|
||||
return userStore.verifyPassword(token.principal(), token.credentials());
|
||||
}
|
||||
|
||||
class Listener implements NativeUsersStore.ChangeListener {
|
||||
|
||||
@Override
|
||||
public void onUsersChanged(List<String> usernames) {
|
||||
for (String username : usernames) {
|
||||
expire(username);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,16 +5,13 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.esnative;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import com.carrotsearch.hppc.ObjectLongMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.DocWriteResponse.Result;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
|
@ -28,7 +25,6 @@ import org.elasticsearch.action.search.SearchResponse;
|
|||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
|
@ -41,16 +37,12 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.engine.DocumentMissingException;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Cancellable;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.security.SecurityTemplateService;
|
||||
import org.elasticsearch.xpack.security.action.realm.ClearRealmCacheRequest;
|
||||
|
@ -64,14 +56,14 @@ import org.elasticsearch.xpack.security.client.SecurityClient;
|
|||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.user.User.Fields;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
@ -81,25 +73,20 @@ import static org.elasticsearch.xpack.security.Security.setting;
|
|||
import static org.elasticsearch.xpack.security.SecurityTemplateService.securityIndexMappingAndTemplateUpToDate;
|
||||
|
||||
/**
|
||||
* ESNativeUsersStore is a {@code UserStore} that, instead of reading from a
|
||||
* file, reads from an Elasticsearch index instead. This {@code UserStore} in
|
||||
* particular implements both a User store and a UserRoles store, which means it
|
||||
* is responsible for fetching not only {@code User} objects, but also
|
||||
* retrieving the roles for a given username.
|
||||
* NativeUsersStore is a store for users that reads from an Elasticsearch index. This store is responsible for fetching the full
|
||||
* {@link User} object, which includes the names of the roles assigned to the user.
|
||||
* <p>
|
||||
* No caching is done by this class, it is handled at a higher level
|
||||
* No caching is done by this class, it is handled at a higher level and no polling for changes is done by this class. Modification
|
||||
* operations make a best effort attempt to clear the cache on all nodes for the user that was modified.
|
||||
*/
|
||||
public class NativeUsersStore extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
public static final Setting<Integer> SCROLL_SIZE_SETTING =
|
||||
private static final Setting<Integer> SCROLL_SIZE_SETTING =
|
||||
Setting.intSetting(setting("authc.native.scroll.size"), 1000, Property.NodeScope);
|
||||
|
||||
public static final Setting<TimeValue> SCROLL_KEEP_ALIVE_SETTING =
|
||||
private static final Setting<TimeValue> SCROLL_KEEP_ALIVE_SETTING =
|
||||
Setting.timeSetting(setting("authc.native.scroll.keep_alive"), TimeValue.timeValueSeconds(10L), Property.NodeScope);
|
||||
|
||||
public static final Setting<TimeValue> POLL_INTERVAL_SETTING =
|
||||
Setting.timeSetting(setting("authc.native.reload.interval"), TimeValue.timeValueSeconds(30L), Property.NodeScope);
|
||||
|
||||
public enum State {
|
||||
INITIALIZED,
|
||||
STARTING,
|
||||
|
@ -109,25 +96,20 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
FAILED
|
||||
}
|
||||
|
||||
public static final String USER_DOC_TYPE = "user";
|
||||
static final String RESERVED_USER_DOC_TYPE = "reserved-user";
|
||||
private static final String USER_DOC_TYPE = "user";
|
||||
private static final String RESERVED_USER_DOC_TYPE = "reserved-user";
|
||||
|
||||
private final Hasher hasher = Hasher.BCRYPT;
|
||||
private final List<ChangeListener> listeners = new CopyOnWriteArrayList<>();
|
||||
private final AtomicReference<State> state = new AtomicReference<>(State.INITIALIZED);
|
||||
private final InternalClient client;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private Cancellable pollerCancellable;
|
||||
private int scrollSize;
|
||||
private TimeValue scrollKeepAlive;
|
||||
|
||||
private volatile boolean securityIndexExists = false;
|
||||
|
||||
public NativeUsersStore(Settings settings, InternalClient client, ThreadPool threadPool) {
|
||||
public NativeUsersStore(Settings settings, InternalClient client) {
|
||||
super(settings);
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -249,6 +231,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking method to get the user and their password hash
|
||||
*/
|
||||
private UserAndPassword getUserAndPassword(final String username) {
|
||||
final AtomicReference<UserAndPassword> userRef = new AtomicReference<>(null);
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
@ -278,6 +263,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
return userRef.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Async method to retrieve a user and their password
|
||||
*/
|
||||
private void getUserAndPassword(final String user, final ActionListener<UserAndPassword> listener) {
|
||||
try {
|
||||
GetRequest request = client.prepareGet(SecurityTemplateService.SECURITY_INDEX_NAME, USER_DOC_TYPE, user).request();
|
||||
|
@ -310,17 +298,16 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Async method to change the password of a native or reserved user. If a reserved user does not exist, the document will be created
|
||||
* with a hash of the provided password.
|
||||
*/
|
||||
public void changePassword(final ChangePasswordRequest request, final ActionListener<Void> listener) {
|
||||
final String username = request.username();
|
||||
if (SystemUser.NAME.equals(username)) {
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("changing the password for [" + username + "] is not allowed");
|
||||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
assert SystemUser.NAME.equals(username) == false && XPackUser.NAME.equals(username) == false : username + "is internal!";
|
||||
|
||||
final String docType;
|
||||
if (ReservedRealm.isReserved(username)) {
|
||||
if (ReservedRealm.isReserved(username, settings)) {
|
||||
docType = RESERVED_USER_DOC_TYPE;
|
||||
} else {
|
||||
docType = USER_DOC_TYPE;
|
||||
|
@ -338,33 +325,30 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Throwable cause = e;
|
||||
if (e instanceof ElasticsearchException) {
|
||||
cause = ExceptionsHelper.unwrapCause(e);
|
||||
if ((cause instanceof IndexNotFoundException) == false
|
||||
&& (cause instanceof DocumentMissingException) == false) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
if (docType.equals(RESERVED_USER_DOC_TYPE)) {
|
||||
createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener);
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to change password for user [{}]", request.username()), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("user must exist in order to change password");
|
||||
listener.onFailure(validationException);
|
||||
}
|
||||
}
|
||||
|
||||
if (docType.equals(RESERVED_USER_DOC_TYPE)) {
|
||||
createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener);
|
||||
} else {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to change password for user [{}]", request.username()), cause);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("user must exist in order to change password");
|
||||
listener.onFailure(validationException);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronous method to create a reserved user with the given password hash. The cache for the user will be cleared after the document
|
||||
* has been indexed
|
||||
*/
|
||||
private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener<Void> listener) {
|
||||
client.prepareIndex(SecurityTemplateService.SECURITY_INDEX_NAME, RESERVED_USER_DOC_TYPE, username)
|
||||
.setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash))
|
||||
.setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash), Fields.ENABLED.getPreferredName(), true)
|
||||
.setRefreshPolicy(refresh)
|
||||
.execute(new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
|
@ -379,6 +363,12 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronous method to put a user. A put user request without a password hash is treated as an update and will fail with a
|
||||
* {@link ValidationException} if the user does not exist. If a password hash is provided, then we issue a update request with an
|
||||
* upsert document as well; the upsert document sets the enabled flag of the user to true but if the document already exists, this
|
||||
* method will not modify the enabled value.
|
||||
*/
|
||||
public void putUser(final PutUserRequest request, final ActionListener<Boolean> listener) {
|
||||
if (state() != State.STARTED) {
|
||||
listener.onFailure(new IllegalStateException("user cannot be added as native user service has not been started"));
|
||||
|
@ -389,7 +379,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
if (request.passwordHash() == null) {
|
||||
updateUserWithoutPassword(request, listener);
|
||||
} else {
|
||||
indexUser(request, listener);
|
||||
upsertUser(request, listener);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to put user [{}]", request.username()), e);
|
||||
|
@ -397,6 +387,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles updating a user that should already exist where their password should not change
|
||||
*/
|
||||
private void updateUserWithoutPassword(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
|
||||
assert putUserRequest.passwordHash() == null;
|
||||
// We must have an existing document
|
||||
|
@ -416,52 +409,43 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Throwable cause = e;
|
||||
if (e instanceof ElasticsearchException) {
|
||||
cause = ExceptionsHelper.unwrapCause(e);
|
||||
if ((cause instanceof IndexNotFoundException) == false
|
||||
&& (cause instanceof DocumentMissingException) == false) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
Exception failure = e;
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
// if the index doesn't exist we can never update a user
|
||||
// if the document doesn't exist, then this update is not valid
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update user document with username [{}]",
|
||||
putUserRequest.username()), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("password must be specified unless you are updating an existing user");
|
||||
failure = validationException;
|
||||
}
|
||||
|
||||
// if the index doesn't exist we can never update a user
|
||||
// if the document doesn't exist, then this update is not valid
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to update user document with username [{}]",
|
||||
putUserRequest.username()),
|
||||
cause);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("password must be specified unless you are updating an existing user");
|
||||
listener.onFailure(validationException);
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void indexUser(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
|
||||
private void upsertUser(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
|
||||
assert putUserRequest.passwordHash() != null;
|
||||
client.prepareIndex(SecurityTemplateService.SECURITY_INDEX_NAME,
|
||||
client.prepareUpdate(SecurityTemplateService.SECURITY_INDEX_NAME,
|
||||
USER_DOC_TYPE, putUserRequest.username())
|
||||
.setSource(User.Fields.USERNAME.getPreferredName(), putUserRequest.username(),
|
||||
.setDoc(User.Fields.USERNAME.getPreferredName(), putUserRequest.username(),
|
||||
User.Fields.PASSWORD.getPreferredName(), String.valueOf(putUserRequest.passwordHash()),
|
||||
User.Fields.ROLES.getPreferredName(), putUserRequest.roles(),
|
||||
User.Fields.FULL_NAME.getPreferredName(), putUserRequest.fullName(),
|
||||
User.Fields.EMAIL.getPreferredName(), putUserRequest.email(),
|
||||
User.Fields.METADATA.getPreferredName(), putUserRequest.metadata())
|
||||
.setUpsert(User.Fields.USERNAME.getPreferredName(), putUserRequest.username(),
|
||||
User.Fields.PASSWORD.getPreferredName(), String.valueOf(putUserRequest.passwordHash()),
|
||||
User.Fields.ROLES.getPreferredName(), putUserRequest.roles(),
|
||||
User.Fields.FULL_NAME.getPreferredName(), putUserRequest.fullName(),
|
||||
User.Fields.EMAIL.getPreferredName(), putUserRequest.email(),
|
||||
User.Fields.METADATA.getPreferredName(), putUserRequest.metadata(),
|
||||
User.Fields.ENABLED.getPreferredName(), true)
|
||||
.setRefreshPolicy(putUserRequest.getRefreshPolicy())
|
||||
.execute(new ActionListener<IndexResponse>() {
|
||||
.execute(new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
// if the document was just created, then we don't need to clear cache
|
||||
boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
|
||||
if (created) {
|
||||
listener.onResponse(true);
|
||||
return;
|
||||
}
|
||||
|
||||
clearRealmCache(putUserRequest.username(), listener, created);
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
clearRealmCache(putUserRequest.username(), listener, updateResponse.getResult() == DocWriteResponse.Result.CREATED);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -471,6 +455,82 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronous method that will update the enabled flag of a user. If the user is reserved and the document does not exist, a document
|
||||
* will be created. If the user is not reserved, the user must exist otherwise the operation will fail.
|
||||
*/
|
||||
public void setEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
final ActionListener<Void> listener) {
|
||||
if (state() != State.STARTED) {
|
||||
listener.onFailure(new IllegalStateException("enabled status cannot be changed as native user service has not been started"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (ReservedRealm.isReserved(username, settings)) {
|
||||
setReservedUserEnabled(username, enabled, refreshPolicy, listener);
|
||||
} else {
|
||||
setRegularUserEnabled(username, enabled, refreshPolicy, listener);
|
||||
}
|
||||
}
|
||||
|
||||
private void setRegularUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
final ActionListener<Void> listener) {
|
||||
try {
|
||||
client.prepareUpdate(SecurityTemplateService.SECURITY_INDEX_NAME, USER_DOC_TYPE, username)
|
||||
.setDoc(User.Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.execute(new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == Result.UPDATED;
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Exception failure = e;
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
// if the index doesn't exist we can never update a user
|
||||
// if the document doesn't exist, then this update is not valid
|
||||
logger.debug((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to {} user [{}]", enabled ? "enable" : "disable", username), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("only existing users can be " + (enabled ? "enabled" : "disabled"));
|
||||
failure = validationException;
|
||||
}
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void setReservedUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
final ActionListener<Void> listener) {
|
||||
try {
|
||||
client.prepareUpdate(SecurityTemplateService.SECURITY_INDEX_NAME, RESERVED_USER_DOC_TYPE, username)
|
||||
.setDoc(User.Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setUpsert(User.Fields.PASSWORD.getPreferredName(), String.valueOf(ReservedRealm.DEFAULT_PASSWORD_HASH),
|
||||
User.Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.execute(new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == Result.UPDATED || updateResponse.getResult() == Result.CREATED;
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionListener<Boolean> listener) {
|
||||
if (state() != State.STARTED) {
|
||||
listener.onFailure(new IllegalStateException("user cannot be deleted as native user service has not been started"));
|
||||
|
@ -481,7 +541,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
DeleteRequest request = client.prepareDelete(SecurityTemplateService.SECURITY_INDEX_NAME,
|
||||
USER_DOC_TYPE, deleteUserRequest.username()).request();
|
||||
request.indicesOptions().ignoreUnavailable();
|
||||
request.setRefreshPolicy(deleteUserRequest.refresh() ? RefreshPolicy.IMMEDIATE : RefreshPolicy.WAIT_UNTIL);
|
||||
request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy());
|
||||
client.delete(request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
|
@ -537,15 +597,6 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
if (state.compareAndSet(State.INITIALIZED, State.STARTING)) {
|
||||
this.scrollSize = SCROLL_SIZE_SETTING.get(settings);
|
||||
this.scrollKeepAlive = SCROLL_KEEP_ALIVE_SETTING.get(settings);
|
||||
|
||||
UserStorePoller poller = new UserStorePoller();
|
||||
try {
|
||||
poller.doRun();
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to do initial poll of users", e);
|
||||
}
|
||||
TimeValue interval = settings.getAsTime("shield.authc.native.reload.interval", TimeValue.timeValueSeconds(30L));
|
||||
pollerCancellable = threadPool.scheduleWithFixedDelay(poller, interval, Names.GENERIC);
|
||||
state.set(State.STARTED);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -556,14 +607,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
|
||||
public void stop() {
|
||||
if (state.compareAndSet(State.STARTED, State.STOPPING)) {
|
||||
try {
|
||||
pollerCancellable.cancel();
|
||||
} catch (Exception e) {
|
||||
state.set(State.FAILED);
|
||||
throw e;
|
||||
} finally {
|
||||
state.set(State.STOPPED);
|
||||
}
|
||||
state.set(State.STOPPED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,7 +618,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
* @param password the plaintext password to verify
|
||||
* @return {@link} User object if successful or {@code null} if verification fails
|
||||
*/
|
||||
public User verifyPassword(String username, final SecuredString password) {
|
||||
User verifyPassword(String username, final SecuredString password) {
|
||||
if (state() != State.STARTED) {
|
||||
logger.trace("attempted to verify user credentials for [{}] but service was not started", username);
|
||||
return null;
|
||||
|
@ -590,11 +634,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
return null;
|
||||
}
|
||||
|
||||
public void addListener(ChangeListener listener) {
|
||||
listeners.add(listener);
|
||||
}
|
||||
|
||||
boolean started() {
|
||||
public boolean started() {
|
||||
return state() == State.STARTED;
|
||||
}
|
||||
|
||||
|
@ -602,9 +642,9 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
return securityIndexExists;
|
||||
}
|
||||
|
||||
char[] reservedUserPassword(String username) throws Exception {
|
||||
ReservedUserInfo getReservedUserInfo(String username) throws Exception {
|
||||
assert started();
|
||||
final AtomicReference<char[]> passwordHash = new AtomicReference<>();
|
||||
final AtomicReference<ReservedUserInfo> userInfoRef = new AtomicReference<>();
|
||||
final AtomicReference<Exception> failure = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.prepareGet(SecurityTemplateService.SECURITY_INDEX_NAME, RESERVED_USER_DOC_TYPE, username)
|
||||
|
@ -614,26 +654,26 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
if (getResponse.isExists()) {
|
||||
Map<String, Object> sourceMap = getResponse.getSourceAsMap();
|
||||
String password = (String) sourceMap.get(User.Fields.PASSWORD.getPreferredName());
|
||||
Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName());
|
||||
if (password == null || password.isEmpty()) {
|
||||
failure.set(new IllegalStateException("password hash must not be empty!"));
|
||||
return;
|
||||
} else if (enabled == null) {
|
||||
failure.set(new IllegalStateException("enabled must not be null!"));
|
||||
} else {
|
||||
userInfoRef.set(new ReservedUserInfo(password.toCharArray(), enabled));
|
||||
}
|
||||
passwordHash.set(password.toCharArray());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof IndexNotFoundException) {
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve built in user [{}] password since security index does not exist",
|
||||
username),
|
||||
e);
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve built in user [{}] info since security index does not exist", username), e);
|
||||
} else {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to retrieve built in user [{}] password", username), e);
|
||||
"failed to retrieve built in user [{}] info", username), e);
|
||||
failure.set(e);
|
||||
}
|
||||
}
|
||||
|
@ -653,7 +693,65 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
// if there is any sort of failure we need to throw an exception to prevent the fallback to the default password...
|
||||
throw failureCause;
|
||||
}
|
||||
return passwordHash.get();
|
||||
return userInfoRef.get();
|
||||
}
|
||||
|
||||
Map<String, ReservedUserInfo> getAllReservedUserInfo() throws Exception {
|
||||
assert started();
|
||||
final Map<String, ReservedUserInfo> userInfos = new HashMap<>();
|
||||
final AtomicReference<Exception> failure = new AtomicReference<>();
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
client.prepareSearch(SecurityTemplateService.SECURITY_INDEX_NAME)
|
||||
.setTypes(RESERVED_USER_DOC_TYPE)
|
||||
.setQuery(QueryBuilders.matchAllQuery())
|
||||
.setFetchSource(true)
|
||||
.execute(new LatchedActionListener<>(new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse searchResponse) {
|
||||
assert searchResponse.getHits().getTotalHits() <= 10 : "there are more than 10 reserved users we need to change " +
|
||||
"this to retrieve them all!";
|
||||
for (SearchHit searchHit : searchResponse.getHits().getHits()) {
|
||||
Map<String, Object> sourceMap = searchHit.getSource();
|
||||
String password = (String) sourceMap.get(User.Fields.PASSWORD.getPreferredName());
|
||||
Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName());
|
||||
if (password == null || password.isEmpty()) {
|
||||
failure.set(new IllegalStateException("password hash must not be empty!"));
|
||||
break;
|
||||
} else if (enabled == null) {
|
||||
failure.set(new IllegalStateException("enabled must not be null!"));
|
||||
break;
|
||||
} else {
|
||||
userInfos.put(searchHit.getId(), new ReservedUserInfo(password.toCharArray(), enabled));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof IndexNotFoundException) {
|
||||
logger.trace("could not retrieve built in users since security index does not exist", e);
|
||||
} else {
|
||||
logger.error("failed to retrieve built in users", e);
|
||||
failure.set(e);
|
||||
}
|
||||
}
|
||||
}, latch));
|
||||
|
||||
try {
|
||||
final boolean responseReceived = latch.await(30, TimeUnit.SECONDS);
|
||||
if (responseReceived == false) {
|
||||
failure.set(new TimeoutException("timed out trying to get built in users"));
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
failure.set(e);
|
||||
}
|
||||
|
||||
Exception failureCause = failure.get();
|
||||
if (failureCause != null) {
|
||||
// if there is any sort of failure we need to throw an exception to prevent the fallback to the default password...
|
||||
throw failureCause;
|
||||
}
|
||||
return userInfos;
|
||||
}
|
||||
|
||||
private void clearScrollResponse(String scrollId) {
|
||||
|
@ -716,7 +814,6 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
if (state != State.STOPPED && state != State.FAILED) {
|
||||
throw new IllegalStateException("can only reset if stopped!!!");
|
||||
}
|
||||
this.listeners.clear();
|
||||
this.securityIndexExists = false;
|
||||
this.state.set(State.INITIALIZED);
|
||||
}
|
||||
|
@ -731,158 +828,42 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
String[] roles = ((List<String>) sourceMap.get(User.Fields.ROLES.getPreferredName())).toArray(Strings.EMPTY_ARRAY);
|
||||
String fullName = (String) sourceMap.get(User.Fields.FULL_NAME.getPreferredName());
|
||||
String email = (String) sourceMap.get(User.Fields.EMAIL.getPreferredName());
|
||||
Boolean enabled = (Boolean) sourceMap.get(User.Fields.ENABLED.getPreferredName());
|
||||
if (enabled == null) {
|
||||
// fallback mechanism as a user from 2.x may not have the enabled field
|
||||
enabled = Boolean.TRUE;
|
||||
}
|
||||
Map<String, Object> metadata = (Map<String, Object>) sourceMap.get(User.Fields.METADATA.getPreferredName());
|
||||
return new UserAndPassword(new User(username, roles, fullName, email, metadata), password.toCharArray());
|
||||
return new UserAndPassword(new User(username, roles, fullName, email, metadata, enabled), password.toCharArray());
|
||||
} catch (Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("error in the format of data for user [{}]", username), e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private class UserStorePoller extends AbstractRunnable {
|
||||
|
||||
// this map contains the mapping for username -> version, which is used when polling the index to easily detect of
|
||||
// any changes that may have been missed since the last update.
|
||||
private final ObjectLongHashMap<String> userVersionMap = new ObjectLongHashMap<>();
|
||||
private final ObjectLongHashMap<String> reservedUserVersionMap = new ObjectLongHashMap<>();
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
// hold a reference to the client since the poller may run after the class is stopped (we don't interrupt it running) and
|
||||
// we reset when we test which sets the client to null...
|
||||
final Client client = NativeUsersStore.this.client;
|
||||
if (isStopped()) {
|
||||
return;
|
||||
private static boolean isIndexNotFoundOrDocumentMissing(Exception e) {
|
||||
if (e instanceof ElasticsearchException) {
|
||||
Throwable cause = ExceptionsHelper.unwrapCause(e);
|
||||
if (cause instanceof IndexNotFoundException || cause instanceof DocumentMissingException) {
|
||||
return true;
|
||||
}
|
||||
if (securityIndexExists == false) {
|
||||
logger.trace("cannot poll for user changes since security index [{}] does not exist", SecurityTemplateService
|
||||
.SECURITY_INDEX_NAME);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.trace("starting polling of user index to check for changes");
|
||||
List<String> changedUsers = scrollForModifiedUsers(client, USER_DOC_TYPE, userVersionMap);
|
||||
if (isStopped()) {
|
||||
return;
|
||||
}
|
||||
|
||||
changedUsers.addAll(scrollForModifiedUsers(client, RESERVED_USER_DOC_TYPE, reservedUserVersionMap));
|
||||
if (isStopped()) {
|
||||
return;
|
||||
}
|
||||
|
||||
notifyListeners(changedUsers);
|
||||
logger.trace("finished polling of user index");
|
||||
}
|
||||
|
||||
private List<String> scrollForModifiedUsers(Client client, String docType, ObjectLongMap<String> usersMap) {
|
||||
// create a copy of all known users
|
||||
ObjectHashSet<String> knownUsers = new ObjectHashSet<>(usersMap.keys());
|
||||
List<String> changedUsers = new ArrayList<>();
|
||||
|
||||
SearchResponse response = null;
|
||||
try {
|
||||
client.admin().indices().prepareRefresh(SecurityTemplateService.SECURITY_INDEX_NAME).get();
|
||||
response = client.prepareSearch(SecurityTemplateService.SECURITY_INDEX_NAME)
|
||||
.setScroll(scrollKeepAlive)
|
||||
.setQuery(QueryBuilders.typeQuery(docType))
|
||||
.setSize(scrollSize)
|
||||
.setVersion(true)
|
||||
.setFetchSource(false) // we only need id and version
|
||||
.get();
|
||||
|
||||
boolean keepScrolling = response.getHits().getHits().length > 0;
|
||||
while (keepScrolling) {
|
||||
for (SearchHit hit : response.getHits().getHits()) {
|
||||
final String username = hit.id();
|
||||
final long version = hit.version();
|
||||
if (knownUsers.contains(username)) {
|
||||
final long lastKnownVersion = usersMap.get(username);
|
||||
if (version != lastKnownVersion) {
|
||||
// version is only changed by this method
|
||||
assert version > lastKnownVersion;
|
||||
usersMap.put(username, version);
|
||||
// there is a chance that the user's cache has already been cleared and we'll clear it again but
|
||||
// this should be ok in most cases as user changes should not be that frequent
|
||||
changedUsers.add(username);
|
||||
}
|
||||
knownUsers.remove(username);
|
||||
} else {
|
||||
usersMap.put(username, version);
|
||||
}
|
||||
}
|
||||
|
||||
if (isStopped()) {
|
||||
// bail here
|
||||
return Collections.emptyList();
|
||||
}
|
||||
response = client.prepareSearchScroll(response.getScrollId()).setScroll(scrollKeepAlive).get();
|
||||
keepScrolling = response.getHits().getHits().length > 0;
|
||||
}
|
||||
} catch (IndexNotFoundException e) {
|
||||
logger.trace("security index does not exist", e);
|
||||
} finally {
|
||||
if (response != null && response.getScrollId() != null) {
|
||||
ClearScrollRequest clearScrollRequest = client.prepareClearScroll().addScrollId(response.getScrollId()).request();
|
||||
client.clearScroll(clearScrollRequest).actionGet();
|
||||
}
|
||||
}
|
||||
|
||||
// we now have a list of users that were in our version map and have been deleted
|
||||
Iterator<ObjectCursor<String>> userIter = knownUsers.iterator();
|
||||
while (userIter.hasNext()) {
|
||||
String user = userIter.next().value;
|
||||
usersMap.remove(user);
|
||||
changedUsers.add(user);
|
||||
}
|
||||
|
||||
return changedUsers;
|
||||
}
|
||||
|
||||
private void notifyListeners(List<String> changedUsers) {
|
||||
if (changedUsers.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// make the list unmodifiable to prevent modifications by any listeners
|
||||
changedUsers = Collections.unmodifiableList(changedUsers);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("changes detected for users [{}]", changedUsers);
|
||||
}
|
||||
|
||||
// call listeners
|
||||
RuntimeException ex = null;
|
||||
for (ChangeListener listener : listeners) {
|
||||
try {
|
||||
listener.onUsersChanged(changedUsers);
|
||||
} catch (Exception e) {
|
||||
if (ex == null) ex = new RuntimeException("exception while notifying listeners");
|
||||
ex.addSuppressed(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (ex != null) throw ex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("error occurred while checking the native users for changes", e);
|
||||
}
|
||||
|
||||
private boolean isStopped() {
|
||||
State state = state();
|
||||
return state == State.STOPPED || state == State.STOPPING;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
interface ChangeListener {
|
||||
static class ReservedUserInfo {
|
||||
|
||||
void onUsersChanged(List<String> username);
|
||||
final char[] passwordHash;
|
||||
final boolean enabled;
|
||||
|
||||
ReservedUserInfo(char[] passwordHash, boolean enabled) {
|
||||
this.passwordHash = passwordHash;
|
||||
this.enabled = enabled;
|
||||
}
|
||||
}
|
||||
|
||||
public static void addSettings(List<Setting<?>> settings) {
|
||||
settings.add(SCROLL_SIZE_SETTING);
|
||||
settings.add(SCROLL_KEEP_ALIVE_SETTING);
|
||||
settings.add(POLL_INTERVAL_SETTING);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import org.apache.logging.log4j.util.Supplier;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ChangeListener;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo;
|
||||
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
|
||||
import org.elasticsearch.xpack.security.authc.support.Hasher;
|
||||
import org.elasticsearch.xpack.security.authc.support.SecuredString;
|
||||
|
@ -21,9 +21,12 @@ import org.elasticsearch.xpack.security.user.ElasticUser;
|
|||
import org.elasticsearch.xpack.security.user.KibanaUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A realm for predefined users. These users can only be modified in terms of changing their passwords; no other modifications are allowed.
|
||||
|
@ -32,40 +35,35 @@ import java.util.List;
|
|||
public class ReservedRealm extends CachingUsernamePasswordRealm {
|
||||
|
||||
public static final String TYPE = "reserved";
|
||||
private static final char[] DEFAULT_PASSWORD_HASH = Hasher.BCRYPT.hash(new SecuredString("changeme".toCharArray()));
|
||||
static final char[] DEFAULT_PASSWORD_HASH = Hasher.BCRYPT.hash(new SecuredString("changeme".toCharArray()));
|
||||
private static final ReservedUserInfo DEFAULT_USER_INFO = new ReservedUserInfo(DEFAULT_PASSWORD_HASH, true);
|
||||
|
||||
private final NativeUsersStore nativeUsersStore;
|
||||
private final AnonymousUser anonymousUser;
|
||||
private final boolean anonymousEnabled;
|
||||
|
||||
public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore) {
|
||||
public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore, AnonymousUser anonymousUser) {
|
||||
super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env));
|
||||
this.nativeUsersStore = nativeUsersStore;
|
||||
nativeUsersStore.addListener(new ChangeListener() {
|
||||
@Override
|
||||
public void onUsersChanged(List<String> changedUsers) {
|
||||
changedUsers.stream()
|
||||
.filter(ReservedRealm::isReserved)
|
||||
.forEach(ReservedRealm.this::expire);
|
||||
}
|
||||
});
|
||||
|
||||
this.anonymousUser = anonymousUser;
|
||||
this.anonymousEnabled = AnonymousUser.isAnonymousEnabled(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected User doAuthenticate(UsernamePasswordToken token) {
|
||||
final User user = getUser(token.principal());
|
||||
if (user == null) {
|
||||
if (isReserved(token.principal(), config.globalSettings()) == false) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final char[] passwordHash = getPasswordHash(user.principal());
|
||||
if (passwordHash != null) {
|
||||
final ReservedUserInfo userInfo = getUserInfo(token.principal());
|
||||
if (userInfo != null) {
|
||||
try {
|
||||
if (Hasher.BCRYPT.verify(token.credentials(), passwordHash)) {
|
||||
return user;
|
||||
if (Hasher.BCRYPT.verify(token.credentials(), userInfo.passwordHash)) {
|
||||
return getUser(token.principal(), userInfo);
|
||||
}
|
||||
} finally {
|
||||
if (passwordHash != DEFAULT_PASSWORD_HASH) {
|
||||
Arrays.fill(passwordHash, (char) 0);
|
||||
if (userInfo.passwordHash != DEFAULT_PASSWORD_HASH) {
|
||||
Arrays.fill(userInfo.passwordHash, (char) 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +73,20 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
|
||||
@Override
|
||||
protected User doLookupUser(String username) {
|
||||
return getUser(username);
|
||||
if (isReserved(username, config.globalSettings()) == false) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (AnonymousUser.isAnonymousUsername(username, config.globalSettings())) {
|
||||
return anonymousEnabled ? anonymousUser : null;
|
||||
}
|
||||
|
||||
final ReservedUserInfo userInfo = getUserInfo(username);
|
||||
if (userInfo != null) {
|
||||
return getUser(username, userInfo);
|
||||
}
|
||||
// this was a reserved username - don't allow this to go to another realm...
|
||||
throw Exceptions.authenticationError("failed to lookup user [{}]", username);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,54 +94,71 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
return true;
|
||||
}
|
||||
|
||||
public static boolean isReserved(String username) {
|
||||
public static boolean isReserved(String username, Settings settings) {
|
||||
assert username != null;
|
||||
switch (username) {
|
||||
case ElasticUser.NAME:
|
||||
case KibanaUser.NAME:
|
||||
return true;
|
||||
default:
|
||||
return AnonymousUser.isAnonymousUsername(username);
|
||||
return AnonymousUser.isAnonymousUsername(username, settings);
|
||||
}
|
||||
}
|
||||
|
||||
public static User getUser(String username) {
|
||||
User getUser(String username, ReservedUserInfo userInfo) {
|
||||
assert username != null;
|
||||
switch (username) {
|
||||
case ElasticUser.NAME:
|
||||
return ElasticUser.INSTANCE;
|
||||
return new ElasticUser(userInfo.enabled);
|
||||
case KibanaUser.NAME:
|
||||
return KibanaUser.INSTANCE;
|
||||
return new KibanaUser(userInfo.enabled);
|
||||
default:
|
||||
if (AnonymousUser.enabled() && AnonymousUser.isAnonymousUsername(username)) {
|
||||
return AnonymousUser.INSTANCE;
|
||||
if (anonymousEnabled && anonymousUser.principal().equals(username)) {
|
||||
return anonymousUser;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static Collection<User> users() {
|
||||
if (AnonymousUser.enabled()) {
|
||||
return Arrays.asList(ElasticUser.INSTANCE, KibanaUser.INSTANCE, AnonymousUser.INSTANCE);
|
||||
public Collection<User> users() {
|
||||
if (nativeUsersStore.started() == false) {
|
||||
return anonymousEnabled ? Collections.singletonList(anonymousUser) : Collections.emptyList();
|
||||
}
|
||||
return Arrays.asList(ElasticUser.INSTANCE, KibanaUser.INSTANCE);
|
||||
|
||||
List<User> users = new ArrayList<>(3);
|
||||
try {
|
||||
Map<String, ReservedUserInfo> reservedUserInfos = nativeUsersStore.getAllReservedUserInfo();
|
||||
ReservedUserInfo userInfo = reservedUserInfos.get(ElasticUser.NAME);
|
||||
users.add(new ElasticUser(userInfo == null || userInfo.enabled));
|
||||
userInfo = reservedUserInfos.get(KibanaUser.NAME);
|
||||
users.add(new KibanaUser(userInfo == null || userInfo.enabled));
|
||||
if (anonymousEnabled) {
|
||||
users.add(anonymousUser);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("failed to retrieve reserved users", e);
|
||||
return anonymousEnabled ? Collections.singletonList(anonymousUser) : Collections.emptyList();
|
||||
}
|
||||
|
||||
return users;
|
||||
}
|
||||
|
||||
private char[] getPasswordHash(final String username) {
|
||||
private ReservedUserInfo getUserInfo(final String username) {
|
||||
if (nativeUsersStore.started() == false) {
|
||||
// we need to be able to check for the user store being started...
|
||||
return null;
|
||||
}
|
||||
|
||||
if (nativeUsersStore.securityIndexExists() == false) {
|
||||
return DEFAULT_PASSWORD_HASH;
|
||||
return DEFAULT_USER_INFO;
|
||||
}
|
||||
|
||||
try {
|
||||
char[] passwordHash = nativeUsersStore.reservedUserPassword(username);
|
||||
if (passwordHash == null) {
|
||||
return DEFAULT_PASSWORD_HASH;
|
||||
ReservedUserInfo userInfo = nativeUsersStore.getReservedUserInfo(username);
|
||||
if (userInfo == null) {
|
||||
return DEFAULT_USER_INFO;
|
||||
}
|
||||
return passwordHash;
|
||||
return userInfo;
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to retrieve password hash for reserved user [{}]", username), e);
|
||||
|
|
|
@ -10,6 +10,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.watcher.FileChangesListener;
|
||||
import org.elasticsearch.watcher.FileWatcher;
|
||||
|
@ -43,7 +44,8 @@ public class FileUserPasswdStore {
|
|||
private final Logger logger;
|
||||
|
||||
private final Path file;
|
||||
final Hasher hasher = Hasher.BCRYPT;
|
||||
private final Hasher hasher = Hasher.BCRYPT;
|
||||
private final Settings settings;
|
||||
|
||||
private volatile Map<String, char[]> users;
|
||||
|
||||
|
@ -56,7 +58,8 @@ public class FileUserPasswdStore {
|
|||
FileUserPasswdStore(RealmConfig config, ResourceWatcherService watcherService, RefreshListener listener) {
|
||||
logger = config.logger(FileUserPasswdStore.class);
|
||||
file = resolveFile(config.env());
|
||||
users = parseFileLenient(file, logger);
|
||||
settings = config.globalSettings();
|
||||
users = parseFileLenient(file, logger, settings);
|
||||
FileWatcher watcher = new FileWatcher(file.getParent());
|
||||
watcher.addListener(new FileListener());
|
||||
try {
|
||||
|
@ -80,9 +83,6 @@ public class FileUserPasswdStore {
|
|||
}
|
||||
|
||||
public boolean verifyPassword(String username, SecuredString password) {
|
||||
if (users == null) {
|
||||
return false;
|
||||
}
|
||||
char[] hash = users.get(username);
|
||||
return hash != null && hasher.verify(password, hash);
|
||||
}
|
||||
|
@ -99,9 +99,9 @@ public class FileUserPasswdStore {
|
|||
* Internally in this class, we try to load the file, but if for some reason we can't, we're being more lenient by
|
||||
* logging the error and skipping all users. This is aligned with how we handle other auto-loaded files in security.
|
||||
*/
|
||||
static Map<String, char[]> parseFileLenient(Path path, Logger logger) {
|
||||
static Map<String, char[]> parseFileLenient(Path path, Logger logger, Settings settings) {
|
||||
try {
|
||||
return parseFile(path, logger);
|
||||
return parseFile(path, logger, settings);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
|
@ -114,7 +114,7 @@ public class FileUserPasswdStore {
|
|||
* parses the users file. Should never return {@code null}, if the file doesn't exist an
|
||||
* empty map is returned
|
||||
*/
|
||||
public static Map<String, char[]> parseFile(Path path, @Nullable Logger logger) {
|
||||
public static Map<String, char[]> parseFile(Path path, @Nullable Logger logger, Settings settings) {
|
||||
if (logger == null) {
|
||||
logger = NoOpLogger.INSTANCE;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ public class FileUserPasswdStore {
|
|||
continue;
|
||||
}
|
||||
String username = line.substring(0, i);
|
||||
Validation.Error validationError = Users.validateUsername(username);
|
||||
Validation.Error validationError = Users.validateUsername(username, false, settings);
|
||||
if (validationError != null) {
|
||||
logger.error("invalid username [{}] in users file [{}], skipping... ({})", username, path.toAbsolutePath(),
|
||||
validationError);
|
||||
|
@ -194,7 +194,7 @@ public class FileUserPasswdStore {
|
|||
public void onFileChanged(Path file) {
|
||||
if (file.equals(FileUserPasswdStore.this.file)) {
|
||||
logger.info("users file [{}] changed. updating users... )", file.toAbsolutePath());
|
||||
users = parseFileLenient(file, logger);
|
||||
users = parseFileLenient(file, logger, settings);
|
||||
notifyRefresh();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ public class FileUserRolesStore {
|
|||
continue;
|
||||
}
|
||||
String role = line.substring(0, i).trim();
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role, true);
|
||||
if (validationError != null) {
|
||||
logger.error("invalid role entry in users_roles file [{}], line [{}] - {}. skipping...", path.toAbsolutePath(), lineNr,
|
||||
validationError);
|
||||
|
|
|
@ -84,21 +84,21 @@ public class UsersTool extends MultiCommand {
|
|||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
String username = parseUsername(arguments.values(options));
|
||||
Validation.Error validationError = Users.validateUsername(username);
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
String username = parseUsername(arguments.values(options), env.settings());
|
||||
Validation.Error validationError = Users.validateUsername(username, false, Settings.EMPTY);
|
||||
if (validationError != null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Invalid username [" + username + "]... " + validationError);
|
||||
}
|
||||
|
||||
char[] password = parsePassword(terminal, passwordOption.value(options));
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
String[] roles = parseRoles(terminal, env, rolesOption.value(options));
|
||||
|
||||
Path passwordFile = FileUserPasswdStore.resolveFile(env);
|
||||
Path rolesFile = FileUserRolesStore.resolveFile(env);
|
||||
FileAttributesChecker attributesChecker = new FileAttributesChecker(passwordFile, rolesFile);
|
||||
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(passwordFile, null));
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(passwordFile, null, env.settings()));
|
||||
if (users.containsKey(username)) {
|
||||
throw new UserException(ExitCodes.CODE_ERROR, "User [" + username + "] already exists");
|
||||
}
|
||||
|
@ -138,13 +138,13 @@ public class UsersTool extends MultiCommand {
|
|||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
String username = parseUsername(arguments.values(options));
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
String username = parseUsername(arguments.values(options), env.settings());
|
||||
Path passwordFile = FileUserPasswdStore.resolveFile(env);
|
||||
Path rolesFile = FileUserRolesStore.resolveFile(env);
|
||||
FileAttributesChecker attributesChecker = new FileAttributesChecker(passwordFile, rolesFile);
|
||||
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(passwordFile, null));
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(passwordFile, null, env.settings()));
|
||||
if (users.containsKey(username) == false) {
|
||||
throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist");
|
||||
}
|
||||
|
@ -193,13 +193,13 @@ public class UsersTool extends MultiCommand {
|
|||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
String username = parseUsername(arguments.values(options));
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
String username = parseUsername(arguments.values(options), env.settings());
|
||||
char[] password = parsePassword(terminal, passwordOption.value(options));
|
||||
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
Path file = FileUserPasswdStore.resolveFile(env);
|
||||
FileAttributesChecker attributesChecker = new FileAttributesChecker(file);
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(file, null));
|
||||
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings()));
|
||||
if (users.containsKey(username) == false) {
|
||||
throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist");
|
||||
}
|
||||
|
@ -237,8 +237,8 @@ public class UsersTool extends MultiCommand {
|
|||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
String username = parseUsername(arguments.values(options));
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
|
||||
String username = parseUsername(arguments.values(options), env.settings());
|
||||
String[] addRoles = parseRoles(terminal, env, addOption.value(options));
|
||||
String[] removeRoles = parseRoles(terminal, env, removeOption.value(options));
|
||||
|
||||
|
@ -254,7 +254,7 @@ public class UsersTool extends MultiCommand {
|
|||
Path rolesFile = FileUserRolesStore.resolveFile(env);
|
||||
FileAttributesChecker attributesChecker = new FileAttributesChecker(usersFile, rolesFile);
|
||||
|
||||
Map<String, char[]> usersMap = FileUserPasswdStore.parseFile(usersFile, null);
|
||||
Map<String, char[]> usersMap = FileUserPasswdStore.parseFile(usersFile, null, env.settings());
|
||||
if (!usersMap.containsKey(username)) {
|
||||
throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist");
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ public class UsersTool extends MultiCommand {
|
|||
Map<String, String[]> userRoles = FileUserRolesStore.parseFile(userRolesFilePath, null);
|
||||
|
||||
Path userFilePath = FileUserPasswdStore.resolveFile(env);
|
||||
Set<String> users = FileUserPasswdStore.parseFile(userFilePath, null).keySet();
|
||||
Set<String> users = FileUserPasswdStore.parseFile(userFilePath, null, env.settings()).keySet();
|
||||
|
||||
Path rolesFilePath = FileRolesStore.resolveFile(env);
|
||||
Set<String> knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFilePath, null), ReservedRolesStore.names());
|
||||
|
@ -388,14 +388,14 @@ public class UsersTool extends MultiCommand {
|
|||
}
|
||||
|
||||
// pkg private for testing
|
||||
static String parseUsername(List<String> args) throws UserException {
|
||||
static String parseUsername(List<String> args, Settings settings) throws UserException {
|
||||
if (args.isEmpty()) {
|
||||
throw new UserException(ExitCodes.USAGE, "Missing username argument");
|
||||
} else if (args.size() > 1) {
|
||||
throw new UserException(ExitCodes.USAGE, "Expected a single username argument, found extra: " + args.toString());
|
||||
}
|
||||
String username = args.get(0);
|
||||
Validation.Error validationError = Users.validateUsername(username);
|
||||
Validation.Error validationError = Users.validateUsername(username, false, settings);
|
||||
if (validationError != null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Invalid username [" + username + "]... " + validationError);
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ public class UsersTool extends MultiCommand {
|
|||
}
|
||||
String[] roles = rolesStr.split(",");
|
||||
for (String role : roles) {
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role, true);
|
||||
if (validationError != null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Invalid role [" + role + "]... " + validationError);
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.common.cache.CacheLoader;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.support.Exceptions;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -149,11 +148,11 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm
|
|||
|
||||
CacheLoader<String, UserWithHash> callback = key -> {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("user not found in cache, proceeding with normal lookup");
|
||||
logger.debug("user [{}] not found in cache, proceeding with normal lookup", username);
|
||||
}
|
||||
User user = doLookupUser(username);
|
||||
if (user == null) {
|
||||
throw Exceptions.authenticationError("could not lookup [{}]", username);
|
||||
return null;
|
||||
}
|
||||
return new UserWithHash(user, null, null);
|
||||
};
|
||||
|
@ -162,10 +161,15 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm
|
|||
UserWithHash userWithHash = cache.computeIfAbsent(username, callback);
|
||||
return userWithHash.user;
|
||||
} catch (ExecutionException ee) {
|
||||
if (ee.getCause() instanceof ElasticsearchSecurityException) {
|
||||
// this should bubble out
|
||||
throw (ElasticsearchSecurityException) ee.getCause();
|
||||
}
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("realm [{}] could not lookup [{}]", name(), username), ee);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("realm [{}] could not authenticate [{}]", name(), username);
|
||||
logger.debug("realm [{}] could not lookup [{}]", name(), username);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.SearchScrollAction;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction.ConcreteShardRequest;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasOrIndex;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -77,11 +78,13 @@ public class AuthorizationService extends AbstractComponent {
|
|||
private final IndicesAndAliasesResolver[] indicesAndAliasesResolvers;
|
||||
private final AuthenticationFailureHandler authcFailureHandler;
|
||||
private final ThreadContext threadContext;
|
||||
private final AnonymousUser anonymousUser;
|
||||
private final boolean isAnonymousEnabled;
|
||||
private final boolean anonymousAuthzExceptionEnabled;
|
||||
|
||||
public AuthorizationService(Settings settings, CompositeRolesStore rolesStore, ClusterService clusterService,
|
||||
AuditTrailService auditTrail, AuthenticationFailureHandler authcFailureHandler,
|
||||
ThreadPool threadPool) {
|
||||
ThreadPool threadPool, AnonymousUser anonymousUser) {
|
||||
super(settings);
|
||||
this.rolesStore = rolesStore;
|
||||
this.clusterService = clusterService;
|
||||
|
@ -91,6 +94,8 @@ public class AuthorizationService extends AbstractComponent {
|
|||
};
|
||||
this.authcFailureHandler = authcFailureHandler;
|
||||
this.threadContext = threadPool.getThreadContext();
|
||||
this.anonymousUser = anonymousUser;
|
||||
this.isAnonymousEnabled = AnonymousUser.isAnonymousEnabled(settings);
|
||||
this.anonymousAuthzExceptionEnabled = ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.get(settings);
|
||||
}
|
||||
|
||||
|
@ -101,7 +106,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
* @param action The action
|
||||
*/
|
||||
public List<String> authorizedIndicesAndAliases(User user, String action) {
|
||||
final String[] anonymousRoles = AnonymousUser.enabled() ? AnonymousUser.getRoles() : Strings.EMPTY_ARRAY;
|
||||
final String[] anonymousRoles = isAnonymousEnabled ? anonymousUser.roles() : Strings.EMPTY_ARRAY;
|
||||
String[] rolesNames = user.roles();
|
||||
if (rolesNames.length == 0 && anonymousRoles.length == 0) {
|
||||
return Collections.emptyList();
|
||||
|
@ -114,7 +119,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
predicates.add(role.indices().allowedIndicesMatcher(action));
|
||||
}
|
||||
}
|
||||
if (AnonymousUser.is(user) == false) {
|
||||
if (anonymousUser.equals(user) == false) {
|
||||
for (String roleName : anonymousRoles) {
|
||||
Role role = rolesStore.role(roleName);
|
||||
if (role != null) {
|
||||
|
@ -155,6 +160,10 @@ public class AuthorizationService extends AbstractComponent {
|
|||
* @throws ElasticsearchSecurityException If the given user is no allowed to execute the given request
|
||||
*/
|
||||
public void authorize(Authentication authentication, String action, TransportRequest request) throws ElasticsearchSecurityException {
|
||||
final TransportRequest originalRequest = request;
|
||||
if (request instanceof ConcreteShardRequest) {
|
||||
request = ((ConcreteShardRequest<?>) request).getRequest();
|
||||
}
|
||||
// prior to doing any authorization lets set the originating action in the context only
|
||||
setOriginatingAction(action);
|
||||
|
||||
|
@ -280,7 +289,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
grant(authentication, action, request);
|
||||
grant(authentication, action, originalRequest);
|
||||
}
|
||||
|
||||
private void setIndicesAccessControl(IndicesAccessControl accessControl) {
|
||||
|
@ -360,7 +369,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
private ElasticsearchSecurityException denialException(Authentication authentication, String action) {
|
||||
final User user = authentication.getUser();
|
||||
// Special case for anonymous user
|
||||
if (AnonymousUser.enabled() && AnonymousUser.is(user)) {
|
||||
if (isAnonymousEnabled && anonymousUser.equals(user)) {
|
||||
if (anonymousAuthzExceptionEnabled == false) {
|
||||
throw authcFailureHandler.authenticationRequired(action, threadContext);
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ public class RoleDescriptor implements ToXContent {
|
|||
|
||||
public static RoleDescriptor parse(String name, XContentParser parser) throws IOException {
|
||||
// validate name
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(name);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(name, true);
|
||||
if (validationError != null) {
|
||||
ValidationException ve = new ValidationException();
|
||||
ve.addValidationError(validationError.toString());
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.search.MultiSearchResponse.Item;
|
|||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
|
@ -74,7 +73,7 @@ import static org.elasticsearch.xpack.security.Security.setting;
|
|||
import static org.elasticsearch.xpack.security.SecurityTemplateService.securityIndexMappingAndTemplateUpToDate;
|
||||
|
||||
/**
|
||||
* ESNativeRolesStore is a {@code RolesStore} that, instead of reading from a
|
||||
* NativeRolesStore is a {@code RolesStore} that, instead of reading from a
|
||||
* file, reads from an Elasticsearch index instead. Unlike the file-based roles
|
||||
* store, ESNativeRolesStore can be used to add a role to the store by inserting
|
||||
* the document into the administrative index.
|
||||
|
@ -264,7 +263,7 @@ public class NativeRolesStore extends AbstractComponent implements RolesStore, C
|
|||
try {
|
||||
DeleteRequest request = client.prepareDelete(SecurityTemplateService.SECURITY_INDEX_NAME,
|
||||
ROLE_DOC_TYPE, deleteRoleRequest.name()).request();
|
||||
request.setRefreshPolicy(deleteRoleRequest.refresh() ? RefreshPolicy.IMMEDIATE : RefreshPolicy.WAIT_UNTIL);
|
||||
request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy());
|
||||
client.delete(request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
|
|
|
@ -24,12 +24,14 @@ import org.elasticsearch.xpack.security.authz.permission.SuperuserRole;
|
|||
import org.elasticsearch.xpack.security.authz.permission.TransportClientRole;
|
||||
import org.elasticsearch.xpack.security.user.KibanaUser;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ReservedRolesStore implements RolesStore {
|
||||
|
||||
private static final User DEFAULT_ENABLED_KIBANA_USER = new KibanaUser(true);
|
||||
private final SecurityContext securityContext;
|
||||
|
||||
public ReservedRolesStore(SecurityContext securityContext) {
|
||||
|
@ -54,8 +56,9 @@ public class ReservedRolesStore implements RolesStore {
|
|||
case KibanaRole.NAME:
|
||||
// The only user that should know about this role is the kibana user itself (who has this role). The reason we want to hide
|
||||
// this role is that it was created specifically for kibana, with all the permissions that the kibana user needs.
|
||||
// We don't want it to be assigned to other users.
|
||||
if (KibanaUser.is(securityContext.getUser())) {
|
||||
// We don't want it to be assigned to other users. The Kibana user here must always be enabled if it is in the
|
||||
// security context
|
||||
if (DEFAULT_ENABLED_KIBANA_USER.equals(securityContext.getUser())) {
|
||||
return KibanaRole.INSTANCE;
|
||||
}
|
||||
return null;
|
||||
|
@ -87,7 +90,7 @@ public class ReservedRolesStore implements RolesStore {
|
|||
// The only user that should know about this role is the kibana user itself (who has this role). The reason we want to hide
|
||||
// this role is that it was created specifically for kibana, with all the permissions that the kibana user needs.
|
||||
// We don't want it to be assigned to other users.
|
||||
if (KibanaUser.is(securityContext.getUser())) {
|
||||
if (DEFAULT_ENABLED_KIBANA_USER.equals(securityContext.getUser())) {
|
||||
return KibanaRole.DESCRIPTOR;
|
||||
}
|
||||
return null;
|
||||
|
@ -97,7 +100,7 @@ public class ReservedRolesStore implements RolesStore {
|
|||
}
|
||||
|
||||
public Collection<RoleDescriptor> roleDescriptors() {
|
||||
if (KibanaUser.is(securityContext.getUser())) {
|
||||
if (DEFAULT_ENABLED_KIBANA_USER.equals(securityContext.getUser())) {
|
||||
return Arrays.asList(SuperuserRole.DESCRIPTOR, TransportClientRole.DESCRIPTOR, KibanaUserRole.DESCRIPTOR,
|
||||
KibanaRole.DESCRIPTOR, MonitoringUserRole.DESCRIPTOR, RemoteMonitoringAgentRole.DESCRIPTOR,
|
||||
IngestAdminRole.DESCRIPTOR);
|
||||
|
|
|
@ -45,6 +45,10 @@ import org.elasticsearch.xpack.security.action.user.PutUserAction;
|
|||
import org.elasticsearch.xpack.security.action.user.PutUserRequest;
|
||||
import org.elasticsearch.xpack.security.action.user.PutUserRequestBuilder;
|
||||
import org.elasticsearch.xpack.security.action.user.PutUserResponse;
|
||||
import org.elasticsearch.xpack.security.action.user.SetEnabledAction;
|
||||
import org.elasticsearch.xpack.security.action.user.SetEnabledRequest;
|
||||
import org.elasticsearch.xpack.security.action.user.SetEnabledRequestBuilder;
|
||||
import org.elasticsearch.xpack.security.action.user.SetEnabledResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -163,6 +167,14 @@ public class SecurityClient {
|
|||
client.execute(ChangePasswordAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
public SetEnabledRequestBuilder prepareSetEnabled(String username, boolean enabled) {
|
||||
return new SetEnabledRequestBuilder(client).username(username).enabled(enabled);
|
||||
}
|
||||
|
||||
public void setEnabled(SetEnabledRequest request, ActionListener<SetEnabledResponse> listener) {
|
||||
client.execute(SetEnabledAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
/** Role Management */
|
||||
|
||||
public GetRolesRequestBuilder prepareGetRoles(String... names) {
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.rest.RestRequest;
|
|||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
import org.elasticsearch.xpack.security.action.role.DeleteRoleRequestBuilder;
|
||||
import org.elasticsearch.xpack.security.action.role.DeleteRoleResponse;
|
||||
import org.elasticsearch.xpack.security.client.SecurityClient;
|
||||
|
||||
|
@ -42,18 +41,16 @@ public class RestDeleteRoleAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public void handleRequest(RestRequest request, final RestChannel channel, NodeClient client) throws Exception {
|
||||
DeleteRoleRequestBuilder requestBuilder = new SecurityClient(client).prepareDeleteRole(request.param("name"));
|
||||
if (request.hasParam("refresh")) {
|
||||
requestBuilder.refresh(request.paramAsBoolean("refresh", true));
|
||||
}
|
||||
requestBuilder.execute(new RestBuilderListener<DeleteRoleResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(DeleteRoleResponse response, XContentBuilder builder) throws Exception {
|
||||
return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND,
|
||||
builder.startObject()
|
||||
.field("found", response.found())
|
||||
.endObject());
|
||||
}
|
||||
});
|
||||
new SecurityClient(client).prepareDeleteRole(request.param("name"))
|
||||
.setRefreshPolicy(request.param("refresh"))
|
||||
.execute(new RestBuilderListener<DeleteRoleResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(DeleteRoleResponse response, XContentBuilder builder) throws Exception {
|
||||
return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND,
|
||||
builder.startObject()
|
||||
.field("found", response.found())
|
||||
.endObject());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class RestChangePasswordAction extends BaseRestHandler {
|
|||
final User user = securityContext.getUser();
|
||||
String username = request.param("username");
|
||||
if (username == null) {
|
||||
username = user.runAs() == null ? user.principal() : user.runAs().principal();;
|
||||
username = user.runAs() == null ? user.principal() : user.runAs().principal();
|
||||
}
|
||||
|
||||
new SecurityClient(client).prepareChangePassword(username, request.content())
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.rest.RestRequest;
|
|||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
import org.elasticsearch.xpack.security.action.user.DeleteUserRequestBuilder;
|
||||
import org.elasticsearch.xpack.security.action.user.DeleteUserResponse;
|
||||
import org.elasticsearch.xpack.security.client.SecurityClient;
|
||||
|
||||
|
@ -42,20 +41,16 @@ public class RestDeleteUserAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public void handleRequest(RestRequest request, final RestChannel channel, NodeClient client) throws Exception {
|
||||
String username = request.param("username");
|
||||
|
||||
DeleteUserRequestBuilder requestBuilder = new SecurityClient(client).prepareDeleteUser(username);
|
||||
if (request.hasParam("refresh")) {
|
||||
requestBuilder.refresh(request.paramAsBoolean("refresh", true));
|
||||
}
|
||||
requestBuilder.execute(new RestBuilderListener<DeleteUserResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(DeleteUserResponse response, XContentBuilder builder) throws Exception {
|
||||
return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND,
|
||||
builder.startObject()
|
||||
.field("found", response.found())
|
||||
.endObject());
|
||||
}
|
||||
});
|
||||
new SecurityClient(client).prepareDeleteUser(request.param("username"))
|
||||
.setRefreshPolicy(request.param("refresh"))
|
||||
.execute(new RestBuilderListener<DeleteUserResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(DeleteUserResponse response, XContentBuilder builder) throws Exception {
|
||||
return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND,
|
||||
builder.startObject()
|
||||
.field("found", response.found())
|
||||
.endObject());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,9 +49,7 @@ public class RestPutUserAction extends BaseRestHandler {
|
|||
@Override
|
||||
public void handleRequest(RestRequest request, final RestChannel channel, NodeClient client) throws Exception {
|
||||
PutUserRequestBuilder requestBuilder = new SecurityClient(client).preparePutUser(request.param("username"), request.content());
|
||||
if (request.hasParam("refresh")) {
|
||||
requestBuilder.setRefreshPolicy(request.param("refresh"));
|
||||
}
|
||||
requestBuilder.setRefreshPolicy(request.param("refresh"));
|
||||
requestBuilder.execute(new RestBuilderListener<PutUserResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(PutUserResponse putUserResponse, XContentBuilder builder) throws Exception {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue