Merge branch 'master' into fix/remove-client-cookie
Original commit: elastic/x-pack-elasticsearch@f701619f79
This commit is contained in:
commit
1a99a400c6
|
@ -70,6 +70,8 @@
|
|||
- match: { _id: "watch_with_groovy_closure" }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- '[groovy] scripts are deprecated, use [painless] scripts instead'
|
||||
xpack.watcher.execute_watch:
|
||||
id: "watch_with_groovy_closure"
|
||||
body: >
|
||||
|
@ -118,6 +120,8 @@
|
|||
{ "status": "red", "@timestamp": "2005-01-01T00:01:55" }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- '[groovy] scripts are deprecated, use [painless] scripts instead'
|
||||
xpack.watcher.execute_watch:
|
||||
id: "watch_with_groovy_closure"
|
||||
body: >
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# Creates indices with old versions of elasticsearch. These indices are used by x-pack plugins like security
|
||||
# to test if the import of metadata that is stored in elasticsearch indexes works correctly.
|
||||
# This tool will start a node on port 9200/9300. If a node is already running on that port then the script will fail.
|
||||
# Currently this script can only deal with versions >=2.3X and < 5.0. Needs more work for versions before or after.
|
||||
# Currently this script can only deal with versions >=2.0.0 and < 5.0. Needs more work for versions before or after.
|
||||
#
|
||||
# Run from x-plugins root directory like so:
|
||||
# python3 ./elasticsearch/x-dev-tools/create_bwc_indexes.py 2.3.4
|
||||
|
@ -50,6 +50,7 @@ try:
|
|||
from elasticsearch import Elasticsearch
|
||||
from elasticsearch.exceptions import ConnectionError
|
||||
from elasticsearch.exceptions import TransportError
|
||||
from elasticsearch.exceptions import NotFoundError
|
||||
from elasticsearch.client import IndicesClient
|
||||
except ImportError as e:
|
||||
print('Can\'t import elasticsearch please install `sudo pip3 install elasticsearch`')
|
||||
|
@ -80,7 +81,10 @@ def start_node(version, release_dir, data_dir):
|
|||
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
def install_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'install', [plugin_name])
|
||||
args = [plugin_name]
|
||||
if parse_version(version) >= parse_version('2.2.0'):
|
||||
args = [plugin_name, '--batch']
|
||||
run_plugin(version, release_dir, 'install', args)
|
||||
|
||||
def remove_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'remove', [plugin_name])
|
||||
|
@ -96,9 +100,8 @@ def create_client():
|
|||
client = Elasticsearch([{'host': 'localhost', 'port': 9200, 'http_auth':'es_admin:0123456789'}])
|
||||
health = client.cluster.health(wait_for_nodes=1)
|
||||
return client
|
||||
except Exception as e:
|
||||
logging.info('got exception while waiting for cluster' + str(e))
|
||||
pass
|
||||
except ConnectionError:
|
||||
logging.info('Not started yet...')
|
||||
time.sleep(1)
|
||||
assert False, 'Timed out waiting for node for %s seconds' % timeout
|
||||
|
||||
|
@ -113,11 +116,17 @@ def generate_security_index(client, version):
|
|||
"roles" : [ "bwc_test_role" ]
|
||||
}
|
||||
|
||||
response = requests.put('http://localhost:9200/_shield/user/bwc_test_user', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('put user reponse: ' + response.text)
|
||||
if (response.status_code != 200) :
|
||||
while True:
|
||||
response = requests.put('http://localhost:9200/_shield/user/bwc_test_user', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('put user reponse: ' + response.text)
|
||||
if response.status_code == 200:
|
||||
break
|
||||
else:
|
||||
if 'service has not been started' in response.text:
|
||||
continue
|
||||
raise Exception('PUT http://localhost:9200/_shield/role/bwc_test_role did not succeed!')
|
||||
|
||||
|
||||
# add a role
|
||||
body = {
|
||||
"cluster": ["all"],
|
||||
|
@ -154,6 +163,107 @@ def generate_security_index(client, version):
|
|||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.security')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
# this adds a couple of watches and waits for the the watch_history to accumulate some results
|
||||
def generate_watcher_index(client, version):
|
||||
logging.info('Adding a watch')
|
||||
body = {
|
||||
"trigger" : {
|
||||
"schedule": {
|
||||
"interval": "1s"
|
||||
}
|
||||
},
|
||||
"condition" : {
|
||||
"always" : {}
|
||||
},
|
||||
"throttle_period": "1s",
|
||||
"actions" : {
|
||||
"index_payload" : {
|
||||
"transform" : {
|
||||
"search" : {
|
||||
"request" : {
|
||||
"body" : { "size": 1, "query" : { "match_all" : {} }}
|
||||
}
|
||||
}
|
||||
},
|
||||
"index" : {
|
||||
"index" : "bwc_watch_index",
|
||||
"doc_type" : "bwc_watch_type"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
response = requests.put('http://localhost:9200/_watcher/watch/bwc_watch', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('PUT watch response: ' + response.text)
|
||||
if (response.status_code != 201) :
|
||||
raise Exception('PUT http://localhost:9200/_watcher/watch/bwc_watch did not succeed!')
|
||||
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('Skipping watch with a funny read timeout because email attachement is not supported by this version')
|
||||
else:
|
||||
logging.info('Adding a watch with a funny read timeout')
|
||||
body = {
|
||||
"trigger" : {
|
||||
"schedule": {
|
||||
"interval": "100s"
|
||||
}
|
||||
},
|
||||
"condition": {
|
||||
"never": {}
|
||||
},
|
||||
"actions": {
|
||||
"work": {
|
||||
"email": {
|
||||
"to": "email@domain.com",
|
||||
"subject": "Test Kibana PDF report",
|
||||
"attachments": {
|
||||
"test_report.pdf": {
|
||||
"http": {
|
||||
"content_type": "application/pdf",
|
||||
"request": {
|
||||
"read_timeout": "100s",
|
||||
"scheme": "https",
|
||||
"host": "example.com",
|
||||
"path":"{{ctx.metadata.report_url}}",
|
||||
"port": 8443,
|
||||
"auth": {
|
||||
"basic": {
|
||||
"username": "Aladdin",
|
||||
"password": "open sesame"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
response = requests.put('http://localhost:9200/_watcher/watch/bwc_funny_timeout', auth=('es_admin', '0123456789'), data=json.dumps(body))
|
||||
logging.info('PUT watch response: ' + response.text)
|
||||
if (response.status_code != 201) :
|
||||
raise Exception('PUT http://localhost:9200/_watcher/watch/bwc_funny_timeout did not succeed!')
|
||||
|
||||
# wait to accumulate some watches
|
||||
logging.info('Waiting for watch results index to fill up...')
|
||||
for attempt in range(1, 31):
|
||||
try:
|
||||
response = client.search(index="bwc_watch_index", body={"query": {"match_all": {}}})
|
||||
logging.info('(' + str(attempt) + ') Got ' + str(response['hits']['total']) + ' hits and want 10...')
|
||||
if response['hits']['total'] >= 10:
|
||||
break
|
||||
except NotFoundError:
|
||||
logging.info('(' + str(attempt) + ') Not found, retrying')
|
||||
time.sleep(1)
|
||||
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.watches')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='.watch_history*')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
health = client.cluster.health(wait_for_status='yellow', wait_for_relocating_shards=0, index='bwc_watch_index')
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
|
||||
def compress_index(version, tmp_dir, output_dir):
|
||||
compress(tmp_dir, output_dir, 'x-pack-%s.zip' % version, 'data')
|
||||
|
||||
|
@ -232,50 +342,52 @@ def main():
|
|||
logging.getLogger('urllib3').setLevel(logging.WARN)
|
||||
cfg = parse_config()
|
||||
for version in cfg.versions:
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('version is ' + version + ' but shield supports native realm oly from 2.3.0 on. nothing to do.')
|
||||
continue
|
||||
else:
|
||||
logging.info('--> Creating x-pack index for %s' % version)
|
||||
logging.info('--> Creating x-pack index for %s' % version)
|
||||
|
||||
# setup for starting nodes
|
||||
release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
raise RuntimeError('ES version %s does not exist in %s' % (version, cfg.releases_dir))
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
node = None
|
||||
# setup for starting nodes
|
||||
release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
raise RuntimeError('ES version %s does not exist in %s' % (version, cfg.releases_dir))
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
node = None
|
||||
|
||||
try:
|
||||
try:
|
||||
|
||||
# install plugins
|
||||
remove_plugin(version, release_dir, 'license')
|
||||
remove_plugin(version, release_dir, 'shield')
|
||||
# remove the shield config too before fresh install
|
||||
run('rm -rf %s' %(os.path.join(release_dir, 'config/shield')))
|
||||
install_plugin(version, release_dir, 'license')
|
||||
install_plugin(version, release_dir, 'shield')
|
||||
# here we could also install watcher etc
|
||||
# install plugins
|
||||
remove_plugin(version, release_dir, 'license')
|
||||
remove_plugin(version, release_dir, 'shield')
|
||||
remove_plugin(version, release_dir, 'watcher')
|
||||
# remove the shield config too before fresh install
|
||||
run('rm -rf %s' %(os.path.join(release_dir, 'config/shield')))
|
||||
install_plugin(version, release_dir, 'license')
|
||||
install_plugin(version, release_dir, 'shield')
|
||||
install_plugin(version, release_dir, 'watcher')
|
||||
# here we could also install watcher etc
|
||||
|
||||
# create admin
|
||||
run('%s useradd es_admin -r admin -p 0123456789' %(os.path.join(release_dir, 'bin/shield/esusers')))
|
||||
node = start_node(version, release_dir, data_dir)
|
||||
# create admin
|
||||
run('%s useradd es_admin -r admin -p 0123456789' %(os.path.join(release_dir, 'bin/shield/esusers')))
|
||||
node = start_node(version, release_dir, data_dir)
|
||||
|
||||
# create a client that authenticates as es_admin
|
||||
client = create_client()
|
||||
# create a client that authenticates as es_admin
|
||||
client = create_client()
|
||||
if parse_version(version) < parse_version('2.3.0'):
|
||||
logging.info('Version is ' + version + ' but shield supports native realm oly from 2.3.0 on. Nothing to do for Shield.')
|
||||
else:
|
||||
generate_security_index(client, version)
|
||||
# here we could also add watches, monitoring etc
|
||||
generate_watcher_index(client, version)
|
||||
# here we could also add watches, monitoring etc
|
||||
|
||||
shutdown_node(node)
|
||||
node = None
|
||||
compress_index(version, tmp_dir, cfg.output_dir)
|
||||
finally:
|
||||
|
||||
if node is not None:
|
||||
# This only happens if we've hit an exception:
|
||||
shutdown_node(node)
|
||||
node = None
|
||||
compress_index(version, tmp_dir, cfg.output_dir)
|
||||
finally:
|
||||
|
||||
if node is not None:
|
||||
# This only happens if we've hit an exception:
|
||||
shutdown_node(node)
|
||||
shutil.rmtree(tmp_dir)
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
|
|
|
@ -7,13 +7,17 @@ package org.elasticsearch.xpack.monitoring.collector.cluster;
|
|||
|
||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ClusterInfoMonitoringDoc extends MonitoringDoc {
|
||||
|
||||
private String clusterName;
|
||||
private String version;
|
||||
private License license;
|
||||
private List<XPackFeatureSet.Usage> usage;
|
||||
private ClusterStatsResponse clusterStats;
|
||||
|
||||
public ClusterInfoMonitoringDoc(String monitoringId, String monitoringVersion) {
|
||||
|
@ -44,6 +48,14 @@ public class ClusterInfoMonitoringDoc extends MonitoringDoc {
|
|||
this.license = license;
|
||||
}
|
||||
|
||||
public List<XPackFeatureSet.Usage> getUsage() {
|
||||
return usage;
|
||||
}
|
||||
|
||||
public void setUsage(List<XPackFeatureSet.Usage> usage) {
|
||||
this.usage = usage;
|
||||
}
|
||||
|
||||
public ClusterStatsResponse getClusterStats() {
|
||||
return clusterStats;
|
||||
}
|
||||
|
|
|
@ -13,10 +13,13 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.license.LicenseService;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.action.XPackUsageRequestBuilder;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringSettings;
|
||||
import org.elasticsearch.xpack.monitoring.collector.AbstractCollector;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
@ -60,25 +63,17 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect() throws Exception {
|
||||
List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
final Supplier<ClusterStatsResponse> clusterStatsSupplier =
|
||||
() -> client.admin().cluster().prepareClusterStats().get(monitoringSettings.clusterStatsTimeout());
|
||||
final Supplier<List<XPackFeatureSet.Usage>> usageSupplier = () -> new XPackUsageRequestBuilder(client).get().getUsages();
|
||||
|
||||
// Retrieves cluster stats
|
||||
ClusterStatsResponse clusterStats = null;
|
||||
try {
|
||||
clusterStats = client.admin().cluster().prepareClusterStats().get(monitoringSettings.clusterStatsTimeout());
|
||||
} catch (ElasticsearchSecurityException e) {
|
||||
if (LicenseUtils.isLicenseExpiredException(e)) {
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"collector [{}] - unable to collect data because of expired license", name()), e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
final ClusterStatsResponse clusterStats = clusterStatsSupplier.get();
|
||||
|
||||
long timestamp = System.currentTimeMillis();
|
||||
String clusterUUID = clusterUUID();
|
||||
DiscoveryNode sourceNode = localNode();
|
||||
final long timestamp = System.currentTimeMillis();
|
||||
final String clusterUUID = clusterUUID();
|
||||
final DiscoveryNode sourceNode = localNode();
|
||||
|
||||
final List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
|
||||
// Adds a cluster info document
|
||||
ClusterInfoMonitoringDoc clusterInfoDoc = new ClusterInfoMonitoringDoc(monitoringId(), monitoringVersion());
|
||||
|
@ -89,6 +84,7 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
clusterInfoDoc.setVersion(Version.CURRENT.toString());
|
||||
clusterInfoDoc.setLicense(licenseService.getLicense());
|
||||
clusterInfoDoc.setClusterStats(clusterStats);
|
||||
clusterInfoDoc.setUsage(collect(usageSupplier));
|
||||
results.add(clusterInfoDoc);
|
||||
|
||||
// Adds a cluster stats document
|
||||
|
@ -103,4 +99,21 @@ public class ClusterStatsCollector extends AbstractCollector {
|
|||
|
||||
return Collections.unmodifiableCollection(results);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private <T> T collect(final Supplier<T> supplier) {
|
||||
try {
|
||||
return supplier.get();
|
||||
} catch (ElasticsearchSecurityException e) {
|
||||
if (LicenseUtils.isLicenseExpiredException(e)) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage(
|
||||
"collector [{}] - unable to collect data because of expired license", name()), e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -11,11 +11,13 @@ import org.elasticsearch.common.hash.MessageDigests;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterInfoMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<ClusterInfoMonitoringDoc> {
|
||||
|
@ -34,27 +36,38 @@ public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<Cluste
|
|||
|
||||
@Override
|
||||
protected void buildXContent(ClusterInfoMonitoringDoc document, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.field(Fields.CLUSTER_NAME, document.getClusterName());
|
||||
builder.field(Fields.VERSION, document.getVersion());
|
||||
builder.field("cluster_name", document.getClusterName());
|
||||
builder.field("version", document.getVersion());
|
||||
|
||||
License license = document.getLicense();
|
||||
final License license = document.getLicense();
|
||||
if (license != null) {
|
||||
builder.startObject(Fields.LICENSE);
|
||||
builder.startObject("license");
|
||||
Map<String, String> extraParams = new MapBuilder<String, String>()
|
||||
.put(License.REST_VIEW_MODE, "true")
|
||||
.map();
|
||||
params = new ToXContent.DelegatingMapParams(extraParams, params);
|
||||
license.toInnerXContent(builder, params);
|
||||
builder.field(Fields.HKEY, hash(license, document.getClusterUUID()));
|
||||
builder.field("hkey", hash(license, document.getClusterUUID()));
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.startObject(Fields.CLUSTER_STATS);
|
||||
ClusterStatsResponse clusterStats = document.getClusterStats();
|
||||
final ClusterStatsResponse clusterStats = document.getClusterStats();
|
||||
if (clusterStats != null) {
|
||||
builder.startObject("cluster_stats");
|
||||
clusterStats.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
final List<XPackFeatureSet.Usage> usages = document.getUsage();
|
||||
if (usages != null) {
|
||||
// in the future we may choose to add other usages under the stack_stats section, but it is only xpack for now
|
||||
// it may also be combined on the UI side of phone-home to add things like "kibana" and "logstash" under "stack_stats"
|
||||
builder.startObject("stack_stats").startObject("xpack");
|
||||
for (final XPackFeatureSet.Usage usage : usages) {
|
||||
builder.field(usage.name(), usage);
|
||||
}
|
||||
builder.endObject().endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
public static String hash(License license, String clusterName) {
|
||||
|
@ -66,15 +79,4 @@ public class ClusterInfoResolver extends MonitoringIndexNameResolver.Data<Cluste
|
|||
return MessageDigests.toHexString(MessageDigests.sha256().digest(toHash.getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String CLUSTER_NAME = "cluster_name";
|
||||
static final String LICENSE = "license";
|
||||
static final String VERSION = "version";
|
||||
static final String CLUSTER_STATS = "cluster_stats";
|
||||
|
||||
static final String HKEY = "hkey";
|
||||
|
||||
static final String UID = "uid";
|
||||
static final String TYPE = "type";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,16 @@ public class NodeStatsResolver extends MonitoringIndexNameResolver.Timestamped<N
|
|||
"node_stats.thread_pool.search.rejected",
|
||||
"node_stats.thread_pool.watcher.threads",
|
||||
"node_stats.thread_pool.watcher.queue",
|
||||
"node_stats.thread_pool.watcher.rejected");
|
||||
"node_stats.thread_pool.watcher.rejected",
|
||||
// Linux Only (at least for now)
|
||||
// Disk Info
|
||||
"node_stats.fs.data.spins",
|
||||
// Node IO Stats
|
||||
"node_stats.fs.io_stats.total.operations",
|
||||
"node_stats.fs.io_stats.total.read_operations",
|
||||
"node_stats.fs.io_stats.total.write_operations",
|
||||
"node_stats.fs.io_stats.total.read_kilobytes",
|
||||
"node_stats.fs.io_stats.total.write_kilobytes");
|
||||
FILTERS = Collections.unmodifiableSet(filters);
|
||||
}
|
||||
|
||||
|
|
|
@ -556,7 +556,38 @@
|
|||
}
|
||||
},
|
||||
"fs": {
|
||||
"type": "object"
|
||||
"properties": {
|
||||
"data": {
|
||||
"properties": {
|
||||
"spins": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io_stats": {
|
||||
"properties": {
|
||||
"total": {
|
||||
"properties": {
|
||||
"operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"read_operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"write_operations": {
|
||||
"type": "long"
|
||||
},
|
||||
"read_kilobytes": {
|
||||
"type": "long"
|
||||
},
|
||||
"write_kilobytes": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"os": {
|
||||
"type": "object"
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.license.License;
|
||||
import org.elasticsearch.xpack.monitoring.MonitoringFeatureSet;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterInfoMonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolverTestCase;
|
||||
|
@ -49,6 +50,7 @@ public class ClusterInfoResolverTests extends MonitoringIndexNameResolverTestCas
|
|||
doc.setClusterName(randomAsciiOfLength(5));
|
||||
doc.setClusterStats(new ClusterStatsResponse(Math.abs(randomLong()), ClusterName.CLUSTER_NAME_SETTING
|
||||
.getDefault(Settings.EMPTY), randomAsciiOfLength(5), Collections.emptyList(), Collections.emptyList()));
|
||||
doc.setUsage(Collections.singletonList(new MonitoringFeatureSet.Usage(randomBoolean(), randomBoolean(), emptyMap())));
|
||||
return doc;
|
||||
} catch (Exception e) {
|
||||
throw new IllegalStateException("Failed to generated random ClusterInfoMonitoringDoc", e);
|
||||
|
@ -72,13 +74,14 @@ public class ClusterInfoResolverTests extends MonitoringIndexNameResolverTestCas
|
|||
assertThat(resolver.id(doc), equalTo(clusterUUID));
|
||||
|
||||
assertSource(resolver.source(doc, XContentType.JSON),
|
||||
Sets.newHashSet(
|
||||
"cluster_uuid",
|
||||
"timestamp",
|
||||
"source_node",
|
||||
"cluster_name",
|
||||
"version",
|
||||
"license",
|
||||
"cluster_stats"));
|
||||
Sets.newHashSet(
|
||||
"cluster_uuid",
|
||||
"timestamp",
|
||||
"source_node",
|
||||
"cluster_name",
|
||||
"version",
|
||||
"license",
|
||||
"cluster_stats",
|
||||
"stack_stats.xpack"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.isEmptyOrNullString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
@ -61,14 +62,14 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
final String clusterUUID = client().admin().cluster().prepareState().setMetaData(true).get().getState().metaData().clusterUUID();
|
||||
assertTrue(Strings.hasText(clusterUUID));
|
||||
|
||||
logger.debug("--> waiting for the monitoring data index to be created (it should have been created by the ClusterInfoCollector)");
|
||||
// waiting for the monitoring data index to be created (it should have been created by the ClusterInfoCollector
|
||||
String dataIndex = ".monitoring-data-" + MonitoringTemplateUtils.TEMPLATE_VERSION;
|
||||
awaitIndexExists(dataIndex);
|
||||
|
||||
logger.debug("--> waiting for cluster info collector to collect data");
|
||||
// waiting for cluster info collector to collect data
|
||||
awaitMonitoringDocsCount(equalTo(1L), ClusterInfoResolver.TYPE);
|
||||
|
||||
logger.debug("--> retrieving cluster info document");
|
||||
// retrieving cluster info document
|
||||
GetResponse response = client().prepareGet(dataIndex, ClusterInfoResolver.TYPE, clusterUUID).get();
|
||||
assertTrue("cluster_info document does not exist in data index", response.isExists());
|
||||
|
||||
|
@ -80,20 +81,19 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat(source.get(MonitoringIndexNameResolver.Fields.CLUSTER_UUID), notNullValue());
|
||||
assertThat(source.get(MonitoringIndexNameResolver.Fields.TIMESTAMP), notNullValue());
|
||||
assertThat(source.get(MonitoringIndexNameResolver.Fields.SOURCE_NODE), notNullValue());
|
||||
assertThat(source.get(ClusterInfoResolver.Fields.CLUSTER_NAME), equalTo(cluster().getClusterName()));
|
||||
assertThat(source.get(ClusterInfoResolver.Fields.VERSION), equalTo(Version.CURRENT.toString()));
|
||||
assertThat(source.get("cluster_name"), equalTo(cluster().getClusterName()));
|
||||
assertThat(source.get("version"), equalTo(Version.CURRENT.toString()));
|
||||
|
||||
logger.debug("--> checking that the document contains license information");
|
||||
Object licenseObj = source.get(ClusterInfoResolver.Fields.LICENSE);
|
||||
Object licenseObj = source.get("license");
|
||||
assertThat(licenseObj, instanceOf(Map.class));
|
||||
Map license = (Map) licenseObj;
|
||||
|
||||
assertThat(license, instanceOf(Map.class));
|
||||
|
||||
String uid = (String) license.get(ClusterInfoResolver.Fields.UID);
|
||||
String uid = (String) license.get("uid");
|
||||
assertThat(uid, not(isEmptyOrNullString()));
|
||||
|
||||
String type = (String) license.get(ClusterInfoResolver.Fields.TYPE);
|
||||
String type = (String) license.get("type");
|
||||
assertThat(type, not(isEmptyOrNullString()));
|
||||
|
||||
String status = (String) license.get(License.Fields.STATUS);
|
||||
|
@ -103,7 +103,7 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat(expiryDate, greaterThan(0L));
|
||||
|
||||
// We basically recompute the hash here
|
||||
String hkey = (String) license.get(ClusterInfoResolver.Fields.HKEY);
|
||||
String hkey = (String) license.get("hkey");
|
||||
String recalculated = ClusterInfoResolver.hash(status, uid, type, String.valueOf(expiryDate), clusterUUID);
|
||||
assertThat(hkey, equalTo(recalculated));
|
||||
|
||||
|
@ -112,14 +112,30 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
assertThat((Long) license.get(License.Fields.ISSUE_DATE_IN_MILLIS), greaterThan(0L));
|
||||
assertThat((Integer) license.get(License.Fields.MAX_NODES), greaterThan(0));
|
||||
|
||||
Object clusterStats = source.get(ClusterInfoResolver.Fields.CLUSTER_STATS);
|
||||
Object clusterStats = source.get("cluster_stats");
|
||||
assertNotNull(clusterStats);
|
||||
assertThat(clusterStats, instanceOf(Map.class));
|
||||
assertThat(((Map) clusterStats).size(), greaterThan(0));
|
||||
|
||||
Object stackStats = source.get("stack_stats");
|
||||
assertNotNull(stackStats);
|
||||
assertThat(stackStats, instanceOf(Map.class));
|
||||
assertThat(((Map) stackStats).size(), equalTo(1));
|
||||
|
||||
Object xpack = ((Map)stackStats).get("xpack");
|
||||
assertNotNull(xpack);
|
||||
assertThat(xpack, instanceOf(Map.class));
|
||||
// it must have at least monitoring, but others may be hidden
|
||||
assertThat(((Map) xpack).size(), greaterThanOrEqualTo(1));
|
||||
|
||||
Object monitoring = ((Map)xpack).get("monitoring");
|
||||
assertNotNull(monitoring);
|
||||
// we don't make any assumptions about what's in it, only that it's there
|
||||
assertThat(monitoring, instanceOf(Map.class));
|
||||
|
||||
waitForMonitoringTemplates();
|
||||
|
||||
logger.debug("--> check that the cluster_info is not indexed");
|
||||
// check that the cluster_info is not indexed
|
||||
securedFlush();
|
||||
securedRefresh();
|
||||
|
||||
|
@ -131,8 +147,7 @@ public class ClusterInfoTests extends MonitoringIntegTestCase {
|
|||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.ACTIVE.label()))
|
||||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.INVALID.label()))
|
||||
.should(QueryBuilders.matchQuery(License.Fields.STATUS, License.Status.EXPIRED.label()))
|
||||
.should(QueryBuilders.matchQuery(ClusterInfoResolver.Fields.CLUSTER_NAME,
|
||||
cluster().getClusterName()))
|
||||
.should(QueryBuilders.matchQuery("cluster_name", cluster().getClusterName()))
|
||||
.minimumNumberShouldMatch(1)
|
||||
).get(), 0L);
|
||||
}
|
||||
|
|
|
@ -112,11 +112,12 @@ public class ClusterStatsResolverTests extends MonitoringIndexNameResolverTestCa
|
|||
BoundTransportAddress transportAddress = new BoundTransportAddress(new TransportAddress[]{LocalTransportAddress.buildUnique()},
|
||||
LocalTransportAddress.buildUnique());
|
||||
return new NodeInfo(Version.CURRENT, org.elasticsearch.Build.CURRENT,
|
||||
new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT),
|
||||
Settings.EMPTY, DummyOsInfo.INSTANCE, new ProcessInfo(randomInt(), randomBoolean()), JvmInfo.jvmInfo(),
|
||||
new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), Settings.EMPTY,
|
||||
DummyOsInfo.INSTANCE, new ProcessInfo(randomInt(), randomBoolean(), randomPositiveLong()), JvmInfo.jvmInfo(),
|
||||
new ThreadPoolInfo(Collections.singletonList(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5))),
|
||||
new TransportInfo(transportAddress, Collections.emptyMap()), new HttpInfo(transportAddress, randomLong()),
|
||||
new PluginsAndModules(), new IngestInfo(Collections.emptyList()), new ByteSizeValue(randomIntBetween(1, 1024)));
|
||||
new PluginsAndModules(Collections.emptyList(), Collections.emptyList()),
|
||||
new IngestInfo(Collections.emptyList()), new ByteSizeValue(randomIntBetween(1, 1024)));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -81,6 +82,19 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
|
|||
if (Constants.WINDOWS && field.startsWith("node_stats.os.cpu.load_average")) {
|
||||
return;
|
||||
}
|
||||
|
||||
// we only report IoStats and spins on Linux
|
||||
if (Constants.LINUX == false) {
|
||||
if (field.startsWith("node_stats.fs.io_stats")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// node_stats.fs.data.spins can be null and it's only reported on Linux
|
||||
if (field.startsWith("node_stats.fs.data.spins")) {
|
||||
return;
|
||||
}
|
||||
|
||||
super.assertSourceField(field, sourceFields);
|
||||
}
|
||||
|
||||
|
@ -140,6 +154,22 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
|
|||
new NodeIndicesStats(new CommonStats(), statsByShard), OsProbe.getInstance().osStats(),
|
||||
ProcessProbe.getInstance().processStats(), JvmStats.jvmStats(),
|
||||
new ThreadPoolStats(threadPoolStats),
|
||||
new FsInfo(0, null, pathInfo), null, null, null, null, null, null);
|
||||
new FsInfo(0, randomIoStats(), pathInfo), null, null, null, null, null, null);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private FsInfo.IoStats randomIoStats() {
|
||||
if (Constants.LINUX) {
|
||||
final int stats = randomIntBetween(1, 3);
|
||||
final FsInfo.DeviceStats[] devices = new FsInfo.DeviceStats[stats];
|
||||
|
||||
for (int i = 0; i < devices.length; ++i) {
|
||||
devices[i] = new FsInfo.DeviceStats(253, 0, "dm-" + i, 287734, 7185242, 8398869, 118857776, null);
|
||||
}
|
||||
|
||||
return new FsInfo.IoStats(devices);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -519,9 +519,6 @@ public abstract class MonitoringIntegTestCase extends ESIntegTestCase {
|
|||
"\n" +
|
||||
"admin:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
|
||||
"transport_client:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
|
||||
"\n" +
|
||||
"monitor:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n"
|
||||
;
|
||||
|
|
|
@ -30,10 +30,7 @@ public class AuthenticateRequest extends ActionRequest<AuthenticateRequest> impl
|
|||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
Validation.Error error = Validation.Users.validateUsername(username);
|
||||
if (error != null) {
|
||||
return addValidationError(error.toString(), null);
|
||||
}
|
||||
// we cannot apply our validation rules here as an authenticate request could be for an LDAP user that doesn't fit our restrictions
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -80,9 +80,6 @@ public class FileUserPasswdStore {
|
|||
}
|
||||
|
||||
public boolean verifyPassword(String username, SecuredString password) {
|
||||
if (users == null) {
|
||||
return false;
|
||||
}
|
||||
char[] hash = users.get(username);
|
||||
return hash != null && hasher.verify(password, hash);
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ public class FileUserRolesStore {
|
|||
continue;
|
||||
}
|
||||
String role = line.substring(0, i).trim();
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role, true);
|
||||
if (validationError != null) {
|
||||
logger.error("invalid role entry in users_roles file [{}], line [{}] - {}. skipping...", path.toAbsolutePath(), lineNr,
|
||||
validationError);
|
||||
|
|
|
@ -446,7 +446,7 @@ public class UsersTool extends MultiCommand {
|
|||
}
|
||||
String[] roles = rolesStr.split(",");
|
||||
for (String role : roles) {
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(role, true);
|
||||
if (validationError != null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Invalid role [" + role + "]... " + validationError);
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ public class RoleDescriptor implements ToXContent {
|
|||
|
||||
public static RoleDescriptor parse(String name, XContentParser parser) throws IOException {
|
||||
// validate name
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(name);
|
||||
Validation.Error validationError = Validation.Roles.validateRoleName(name, true);
|
||||
if (validationError != null) {
|
||||
ValidationException ve = new ValidationException();
|
||||
ve.addValidationError(validationError.toString());
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.support;
|
||||
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
|
@ -19,12 +22,16 @@ public final class Validation {
|
|||
private static final int MIN_PASSWD_LENGTH = 6;
|
||||
|
||||
public static Error validateUsername(String username) {
|
||||
return COMMON_NAME_PATTERN.matcher(username).matches() ?
|
||||
null :
|
||||
new Error("A valid username must be at least 1 character and no longer than 30 characters. " +
|
||||
"It must begin with a letter (`a-z` or `A-Z`) or an underscore (`_`). Subsequent " +
|
||||
"characters can be letters, underscores (`_`), digits (`0-9`) or any of the following " +
|
||||
"symbols `@`, `-`, `.` or `$`");
|
||||
if (COMMON_NAME_PATTERN.matcher(username).matches() == false) {
|
||||
return new Error("A valid username must be at least 1 character and no longer than 30 characters. " +
|
||||
"It must begin with a letter (`a-z` or `A-Z`) or an underscore (`_`). Subsequent " +
|
||||
"characters can be letters, underscores (`_`), digits (`0-9`) or any of the following " +
|
||||
"symbols `@`, `-`, `.` or `$`");
|
||||
}
|
||||
if (ReservedRealm.isReserved(username)) {
|
||||
return new Error("Username [" + username + "] is reserved and may not be used.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public static Error validatePassword(char[] password) {
|
||||
|
@ -38,12 +45,20 @@ public final class Validation {
|
|||
public static final class Roles {
|
||||
|
||||
public static Error validateRoleName(String roleName) {
|
||||
return COMMON_NAME_PATTERN.matcher(roleName).matches() ?
|
||||
null :
|
||||
new Error("A valid role name must be at least 1 character and no longer than 30 characters. " +
|
||||
"It must begin with a letter (`a-z` or `A-Z`) or an underscore (`_`). Subsequent " +
|
||||
"characters can be letters, underscores (`_`), digits (`0-9`) or any of the following " +
|
||||
"symbols `@`, `-`, `.` or `$`");
|
||||
return validateRoleName(roleName, false);
|
||||
}
|
||||
|
||||
public static Error validateRoleName(String roleName, boolean allowReserved) {
|
||||
if (COMMON_NAME_PATTERN.matcher(roleName).matches() == false) {
|
||||
return new Error("A valid role name must be at least 1 character and no longer than 30 characters. " +
|
||||
"It must begin with a letter (`a-z` or `A-Z`) or an underscore (`_`). Subsequent " +
|
||||
"characters can be letters, underscores (`_`), digits (`0-9`) or any of the following " +
|
||||
"symbols `@`, `-`, `.` or `$`");
|
||||
}
|
||||
if (allowReserved == false && ReservedRolesStore.isReserved(roleName)) {
|
||||
return new Error("Role [" + roleName + "] is reserved and may not be used.");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.SecurityIntegTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.test.OldIndexUtils.copyIndex;
|
||||
import static org.elasticsearch.test.OldIndexUtils.loadDataFilesList;
|
||||
|
||||
/**
|
||||
* Base class for tests against clusters coming from old versions of xpack and Elasticsearch.
|
||||
*/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) // We'll start the nodes manually
|
||||
public abstract class AbstractOldXPackIndicesBackwardsCompatibilityTestCase extends SecurityIntegTestCase {
|
||||
protected List<String> dataFiles;
|
||||
|
||||
@Override
|
||||
protected final boolean ignoreExternalCluster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Before
|
||||
public final void initIndexesList() throws Exception {
|
||||
dataFiles = loadDataFilesList("x-pack", getBwcIndicesPath());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings nodeSettings(int ord) {
|
||||
// speed up recoveries
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(ord))
|
||||
.put(ThrottlingAllocationDecider
|
||||
.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 30)
|
||||
.put(ThrottlingAllocationDecider
|
||||
.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 30)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int maxNumberOfNodes() {
|
||||
try {
|
||||
return SecurityIntegTestCase.defaultMaxNumberOfNodes() + loadDataFilesList("x-pack", getBwcIndicesPath()).size();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("couldn't enumerate bwc indices", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void testAllVersionsTested() throws Exception {
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
if (false == shouldTestVersion(v)) continue;
|
||||
if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself
|
||||
if (v.isBeta() == true || v.isAlpha() == true || v.isRC() == true) continue; // don't check alphas etc
|
||||
expectedVersions.add("x-pack-" + v.toString() + ".zip");
|
||||
}
|
||||
expectedVersions.removeAll(dataFiles);
|
||||
if (expectedVersions.isEmpty() == false) {
|
||||
StringBuilder msg = new StringBuilder("Old index tests are missing indexes:");
|
||||
for (String expected : expectedVersions) {
|
||||
msg.append("\n" + expected);
|
||||
}
|
||||
fail(msg.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public void testOldIndexes() throws Exception {
|
||||
Collections.shuffle(dataFiles, random());
|
||||
for (String dataFile : dataFiles) {
|
||||
Version version = Version.fromString(dataFile.replace("x-pack-", "").replace(".zip", ""));
|
||||
if (false == shouldTestVersion(version)) continue;
|
||||
setupCluster(dataFile);
|
||||
ensureYellow();
|
||||
long startTime = System.nanoTime();
|
||||
try {
|
||||
checkVersion(version);
|
||||
} catch (Throwable t) {
|
||||
throw new AssertionError("Failed while checking [" + version + "]", t);
|
||||
}
|
||||
logger.info("--> Done testing {}, took {} millis", version, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Should we test this version at all? Called before loading the data directory. Return false to skip it entirely.
|
||||
*/
|
||||
protected boolean shouldTestVersion(Version version) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Actually test this version.
|
||||
*/
|
||||
protected abstract void checkVersion(Version version) throws Exception;
|
||||
|
||||
private void setupCluster(String pathToZipFile) throws Exception {
|
||||
// shutdown any nodes from previous zip files
|
||||
while (internalCluster().size() > 0) {
|
||||
internalCluster().stopRandomNode(s -> true);
|
||||
}
|
||||
// first create the data directory and unzip the data there
|
||||
// we put the whole cluster state and indexes because if we only copy indexes and import them as dangling then
|
||||
// the native realm services will start because there is no security index and nothing is recovering
|
||||
// but we want them to not start!
|
||||
Path dataPath = createTempDir();
|
||||
Settings.Builder nodeSettings = Settings.builder()
|
||||
.put("path.data", dataPath.toAbsolutePath());
|
||||
// unzip data
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(pathToZipFile);
|
||||
// decompress the index
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
logger.info("unzipping {}", backwardsIndex.toString());
|
||||
TestUtil.unzip(stream, dataPath);
|
||||
// now we need to copy the whole thing so that it looks like an actual data path
|
||||
try (Stream<Path> unzippedFiles = Files.list(dataPath.resolve("data"))) {
|
||||
Path dataDir = unzippedFiles.findFirst().get();
|
||||
// this is not actually an index but the copy does the job anyway
|
||||
copyIndex(logger, dataDir.resolve("nodes"), "nodes", dataPath);
|
||||
// remove the original unzipped directory
|
||||
}
|
||||
IOUtils.rm(dataPath.resolve("data"));
|
||||
}
|
||||
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(dataPath));
|
||||
Path[] list = FileSystemUtils.files(dataPath);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one node");
|
||||
}
|
||||
|
||||
// start the node
|
||||
logger.info("--> Data path for importing node: {}", dataPath);
|
||||
String importingNodeName = internalCluster().startNode(nodeSettings.build());
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, importingNodeName).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
}
|
||||
}
|
|
@ -5,19 +5,9 @@
|
|||
*/
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.SecurityIntegTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.xpack.security.action.role.GetRolesResponse;
|
||||
import org.elasticsearch.xpack.security.action.role.PutRoleResponse;
|
||||
import org.elasticsearch.xpack.security.action.user.GetUsersResponse;
|
||||
|
@ -28,26 +18,13 @@ import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
|||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
import org.elasticsearch.xpack.security.client.SecurityClient;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.test.OldIndexUtils.copyIndex;
|
||||
import static org.elasticsearch.test.OldIndexUtils.loadIndexesList;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordTokenTests.basicAuthHeaderValue;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* Backwards compatibility test that loads some data from a pre-5.0 cluster and attempts to do some basic security stuff with it. It
|
||||
|
@ -73,126 +50,13 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
* <li>This document in {@code index3}: {@code {"title": "bwc_test_user should not see this index"}}</li>
|
||||
* </ul>
|
||||
**/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) // We'll start the nodes manually
|
||||
public class OldSecurityIndexBackwardsCompatibilityIT extends SecurityIntegTestCase {
|
||||
|
||||
List<String> indexes;
|
||||
static String importingNodeName;
|
||||
static Path dataPath;
|
||||
|
||||
public class OldSecurityIndexBackwardsCompatibilityIT extends AbstractOldXPackIndicesBackwardsCompatibilityTestCase {
|
||||
@Override
|
||||
protected boolean ignoreExternalCluster() {
|
||||
return true;
|
||||
protected boolean shouldTestVersion(Version version) {
|
||||
return version.onOrAfter(Version.V_2_3_0); // native realm only supported from 2.3.0 on
|
||||
}
|
||||
|
||||
@Before
|
||||
public void initIndexesList() throws Exception {
|
||||
indexes = loadIndexesList("x-pack", getBwcIndicesPath());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownStatics() {
|
||||
importingNodeName = null;
|
||||
dataPath = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings nodeSettings(int ord) {
|
||||
Settings settings = super.nodeSettings(ord);
|
||||
// speed up recoveries
|
||||
return Settings.builder()
|
||||
.put(ThrottlingAllocationDecider
|
||||
.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 30)
|
||||
.put(ThrottlingAllocationDecider
|
||||
.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 30)
|
||||
.put(settings).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int maxNumberOfNodes() {
|
||||
try {
|
||||
return SecurityIntegTestCase.defaultMaxNumberOfNodes() + loadIndexesList("x-pack", getBwcIndicesPath()).size();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("couldn't enumerate bwc indices", e);
|
||||
}
|
||||
}
|
||||
|
||||
void setupCluster(String pathToZipFile) throws Exception {
|
||||
// shutdown any nodes from previous zip files
|
||||
while (internalCluster().size() > 0) {
|
||||
internalCluster().stopRandomNode(s -> true);
|
||||
}
|
||||
// first create the data directory and unzip the data there
|
||||
// we put the whole cluster state and indexes because if we only copy indexes and import them as dangling then
|
||||
// the native realm services will start because there is no security index and nothing is recovering
|
||||
// but we want them to not start!
|
||||
dataPath = createTempDir();
|
||||
Settings.Builder nodeSettings = Settings.builder()
|
||||
.put("path.data", dataPath.toAbsolutePath());
|
||||
// unzip data
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(pathToZipFile);
|
||||
// decompress the index
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
logger.info("unzipping {}", backwardsIndex.toString());
|
||||
TestUtil.unzip(stream, dataPath);
|
||||
// now we need to copy the whole thing so that it looks like an actual data path
|
||||
try (Stream<Path> unzippedFiles = Files.list(dataPath.resolve("data"))) {
|
||||
Path dataDir = unzippedFiles.findFirst().get();
|
||||
// this is not actually an index but the copy does the job anyway
|
||||
copyIndex(logger, dataDir.resolve("nodes"), "nodes", dataPath);
|
||||
// remove the original unzipped directory
|
||||
}
|
||||
IOUtils.rm(dataPath.resolve("data"));
|
||||
}
|
||||
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(dataPath));
|
||||
Path[] list = FileSystemUtils.files(dataPath);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one node");
|
||||
}
|
||||
|
||||
// start the node
|
||||
logger.info("--> Data path for importing node: {}", dataPath);
|
||||
importingNodeName = internalCluster().startNode(nodeSettings.build());
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, importingNodeName).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
}
|
||||
|
||||
public void testAllVersionsTested() throws Exception {
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
if (v.before(Version.V_2_3_0)) continue; // native realm only supported from 2.3.0 on
|
||||
if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself
|
||||
if (v.isBeta() == true || v.isAlpha() == true || v.isRC() == true) continue; // don't check alphas etc
|
||||
expectedVersions.add("x-pack-" + v.toString() + ".zip");
|
||||
}
|
||||
for (String index : indexes) {
|
||||
if (expectedVersions.remove(index) == false) {
|
||||
logger.warn("Old indexes tests contain extra index: {}", index);
|
||||
}
|
||||
}
|
||||
if (expectedVersions.isEmpty() == false) {
|
||||
StringBuilder msg = new StringBuilder("Old index tests are missing indexes:");
|
||||
for (String expected : expectedVersions) {
|
||||
msg.append("\n" + expected);
|
||||
}
|
||||
fail(msg.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public void testOldIndexes() throws Exception {
|
||||
Collections.shuffle(indexes, random());
|
||||
for (String index : indexes) {
|
||||
setupCluster(index);
|
||||
ensureYellow();
|
||||
long startTime = System.nanoTime();
|
||||
assertBasicSecurityWorks();
|
||||
logger.info("--> Done testing {}, took {} millis", index, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime));
|
||||
}
|
||||
}
|
||||
|
||||
void assertBasicSecurityWorks() throws Exception {
|
||||
protected void checkVersion(Version version) throws Exception {
|
||||
// test that user and roles are there
|
||||
logger.info("Getting roles...");
|
||||
SecurityClient securityClient = new SecurityClient(client());
|
||||
|
@ -225,7 +89,7 @@ public class OldSecurityIndexBackwardsCompatibilityIT extends SecurityIntegTestC
|
|||
assertEquals("bwc_test_user", user.principal());
|
||||
|
||||
// check that documents are there
|
||||
assertThat(client().prepareSearch().get().getHits().getTotalHits(), equalTo(5L));
|
||||
assertHitCount(client().prepareSearch("index1", "index2", "index3").get(), 5);
|
||||
|
||||
Client bwcTestUserClient = client().filterWithHeader(
|
||||
singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("bwc_test_user", "9876543210")));
|
||||
|
|
|
@ -43,11 +43,6 @@ public class PermissionPrecedenceTests extends SecurityIntegTestCase {
|
|||
" - names: '*'\n" +
|
||||
" privileges: [ all ]" +
|
||||
"\n" +
|
||||
"transport_client:\n" +
|
||||
" cluster:\n" +
|
||||
" - cluster:monitor/nodes/info\n" +
|
||||
" - cluster:monitor/state\n" +
|
||||
"\n" +
|
||||
"user:\n" +
|
||||
" indices:\n" +
|
||||
" - names: 'test_*'\n" +
|
||||
|
|
|
@ -57,7 +57,7 @@ public class SecuritySettingsSource extends ClusterDiscoveryConfiguration.Unicas
|
|||
public static final String DEFAULT_PASSWORD_HASHED = new String(Hasher.BCRYPT.hash(new SecuredString(DEFAULT_PASSWORD.toCharArray())));
|
||||
public static final String DEFAULT_ROLE = "user";
|
||||
|
||||
public static final String DEFAULT_TRANSPORT_CLIENT_ROLE = "trans_client_user";
|
||||
public static final String DEFAULT_TRANSPORT_CLIENT_ROLE = "transport_client";
|
||||
public static final String DEFAULT_TRANSPORT_CLIENT_USER_NAME = "test_trans_client_user";
|
||||
|
||||
public static final String CONFIG_STANDARD_USER =
|
||||
|
@ -73,10 +73,7 @@ public class SecuritySettingsSource extends ClusterDiscoveryConfiguration.Unicas
|
|||
" cluster: [ ALL ]\n" +
|
||||
" indices:\n" +
|
||||
" - names: '*'\n" +
|
||||
" privileges: [ ALL ]\n" +
|
||||
DEFAULT_TRANSPORT_CLIENT_ROLE + ":\n" +
|
||||
" cluster:\n" +
|
||||
" - transport_client";
|
||||
" privileges: [ ALL ]\n";
|
||||
|
||||
private final Path parentFolder;
|
||||
private final String subfolderPrefix;
|
||||
|
|
|
@ -39,8 +39,6 @@ public class RunAsIntegTests extends SecurityIntegTestCase {
|
|||
static final String RUN_AS_USER = "run_as_user";
|
||||
static final String TRANSPORT_CLIENT_USER = "transport_user";
|
||||
static final String ROLES =
|
||||
"transport_client:\n" +
|
||||
" cluster: [ 'cluster:monitor/nodes/liveness' ]\n" +
|
||||
"run_as_role:\n" +
|
||||
" run_as: [ '" + SecuritySettingsSource.DEFAULT_USER_NAME + "', 'idontexist' ]\n";
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.authz.store;
|
|||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.LogEvent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
|
@ -361,14 +360,14 @@ public class FileRolesStoreTests extends ESTestCase {
|
|||
|
||||
assertThat(roles, hasKey("admin"));
|
||||
|
||||
List<String> events = CapturingLogger.output(logger.getName(), Level.WARN);
|
||||
List<String> events = CapturingLogger.output(logger.getName(), Level.ERROR);
|
||||
assertThat(events, notNullValue());
|
||||
assertThat(events, hasSize(4));
|
||||
// the system role will always be checked first
|
||||
assertThat(events.get(0), containsString("role [_system] is reserved"));
|
||||
assertThat(events.get(1), containsString("role [superuser] is reserved"));
|
||||
assertThat(events.get(2), containsString("role [kibana] is reserved"));
|
||||
assertThat(events.get(3), containsString("role [transport_client] is reserved"));
|
||||
assertThat(events.get(0), containsString("Role [_system] is reserved"));
|
||||
assertThat(events.get(1), containsString("Role [superuser] is reserved"));
|
||||
assertThat(events.get(2), containsString("Role [kibana] is reserved"));
|
||||
assertThat(events.get(3), containsString("Role [transport_client] is reserved"));
|
||||
}
|
||||
|
||||
public void testUsageStats() throws Exception {
|
||||
|
|
|
@ -5,11 +5,16 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.support;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
import org.elasticsearch.xpack.security.support.Validation.Error;
|
||||
import org.elasticsearch.xpack.security.support.Validation.Users;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.security.user.ElasticUser;
|
||||
import org.elasticsearch.xpack.security.user.KibanaUser;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
@ -53,6 +58,13 @@ public class ValidationTests extends ESTestCase {
|
|||
assertThat(Users.validateUsername(name), nullValue());
|
||||
}
|
||||
|
||||
public void testReservedUsernames() {
|
||||
final String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME);
|
||||
final Error error = Users.validateUsername(username);
|
||||
assertNotNull(error);
|
||||
assertThat(error.toString(), containsString("is reserved"));
|
||||
}
|
||||
|
||||
public void testUsersValidateUsernameInvalidLength() throws Exception {
|
||||
int length = frequently() ? randomIntBetween(31, 200) : 0; // invalid length
|
||||
char[] name = new char[length];
|
||||
|
@ -84,6 +96,16 @@ public class ValidationTests extends ESTestCase {
|
|||
assertThat(Validation.Roles.validateRoleName(name), nullValue());
|
||||
}
|
||||
|
||||
public void testReservedRoleName() {
|
||||
final String rolename = randomFrom(ReservedRolesStore.names());
|
||||
final Error error = Validation.Roles.validateRoleName(rolename);
|
||||
assertNotNull(error);
|
||||
assertThat(error.toString(), containsString("is reserved"));
|
||||
|
||||
final Error allowed = Validation.Roles.validateRoleName(rolename, true);
|
||||
assertNull(allowed);
|
||||
}
|
||||
|
||||
public void testRolesValidateRoleNameInvalidLength() throws Exception {
|
||||
int length = frequently() ? randomIntBetween(31, 200) : 0; // invalid length
|
||||
char[] name = new char[length];
|
||||
|
|
|
@ -10,7 +10,6 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -18,10 +17,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestUtils;
|
||||
import org.elasticsearch.xpack.common.http.auth.HttpAuth;
|
||||
import org.elasticsearch.xpack.common.http.auth.HttpAuthRegistry;
|
||||
import org.elasticsearch.xpack.watcher.support.WatcherDateTimeUtils;
|
||||
import org.elasticsearch.xpack.watcher.support.WatcherUtils;
|
||||
import org.elasticsearch.xpack.common.http.auth.HttpAuth;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
@ -159,10 +158,12 @@ public class HttpRequest implements ToXContent {
|
|||
builder.field(Field.BODY.getPreferredName(), body);
|
||||
}
|
||||
if (connectionTimeout != null) {
|
||||
builder.field(Field.CONNECTION_TIMEOUT.getPreferredName(), connectionTimeout);
|
||||
builder.timeValueField(HttpRequest.Field.CONNECTION_TIMEOUT.getPreferredName(),
|
||||
HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.getPreferredName(), connectionTimeout);
|
||||
}
|
||||
if (readTimeout != null) {
|
||||
builder.field(Field.READ_TIMEOUT.getPreferredName(), readTimeout);
|
||||
builder.timeValueField(HttpRequest.Field.READ_TIMEOUT.getPreferredName(),
|
||||
HttpRequest.Field.READ_TIMEOUT_HUMAN.getPreferredName(), readTimeout);
|
||||
}
|
||||
if (proxy != null) {
|
||||
builder.field(Field.PROXY.getPreferredName(), proxy);
|
||||
|
@ -269,19 +270,26 @@ public class HttpRequest implements ToXContent {
|
|||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.AUTH)) {
|
||||
builder.auth(httpAuthRegistry.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.CONNECTION_TIMEOUT)) {
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT)) {
|
||||
builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser, Field.CONNECTION_TIMEOUT.toString()));
|
||||
builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser,
|
||||
HttpRequest.Field.CONNECTION_TIMEOUT.toString()));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse http request. invalid time value for [{}] field", pe,
|
||||
currentFieldName);
|
||||
throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field",
|
||||
pe, currentFieldName);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.READ_TIMEOUT)) {
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT)) {
|
||||
builder.readTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT_HUMAN)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, Field.READ_TIMEOUT.toString()));
|
||||
builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString()));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse http request. invalid time value for [{}] field", pe,
|
||||
currentFieldName);
|
||||
throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field",
|
||||
pe, currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.HEADERS)) {
|
||||
|
@ -482,8 +490,10 @@ public class HttpRequest implements ToXContent {
|
|||
ParseField HEADERS = new ParseField("headers");
|
||||
ParseField AUTH = new ParseField("auth");
|
||||
ParseField BODY = new ParseField("body");
|
||||
ParseField CONNECTION_TIMEOUT = new ParseField("connection_timeout");
|
||||
ParseField READ_TIMEOUT = new ParseField("read_timeout");
|
||||
ParseField CONNECTION_TIMEOUT = new ParseField("connection_timeout_in_millis");
|
||||
ParseField CONNECTION_TIMEOUT_HUMAN = new ParseField("connection_timeout");
|
||||
ParseField READ_TIMEOUT = new ParseField("read_timeout_millis");
|
||||
ParseField READ_TIMEOUT_HUMAN = new ParseField("read_timeout");
|
||||
ParseField PROXY = new ParseField("proxy");
|
||||
ParseField URL = new ParseField("url");
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.rest.RestUtils;
|
||||
import org.elasticsearch.xpack.common.http.auth.HttpAuth;
|
||||
import org.elasticsearch.xpack.common.http.auth.HttpAuthRegistry;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplateEngine;
|
||||
import org.elasticsearch.xpack.watcher.support.WatcherDateTimeUtils;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,8 +32,6 @@ import static java.util.Collections.emptyMap;
|
|||
import static java.util.Collections.singletonMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class HttpRequestTemplate implements ToXContent {
|
||||
|
||||
private final Scheme scheme;
|
||||
|
@ -193,10 +191,12 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
builder.field(HttpRequest.Field.BODY.getPreferredName(), body, params);
|
||||
}
|
||||
if (connectionTimeout != null) {
|
||||
builder.field(HttpRequest.Field.CONNECTION_TIMEOUT.getPreferredName(), connectionTimeout);
|
||||
builder.timeValueField(HttpRequest.Field.CONNECTION_TIMEOUT.getPreferredName(),
|
||||
HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.getPreferredName(), connectionTimeout);
|
||||
}
|
||||
if (readTimeout != null) {
|
||||
builder.field(HttpRequest.Field.READ_TIMEOUT.getPreferredName(), readTimeout);
|
||||
builder.timeValueField(HttpRequest.Field.READ_TIMEOUT.getPreferredName(),
|
||||
HttpRequest.Field.READ_TIMEOUT_HUMAN.getPreferredName(), readTimeout);
|
||||
}
|
||||
if (proxy != null) {
|
||||
proxy.toXContent(builder, params);
|
||||
|
@ -242,6 +242,11 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
|
||||
public static Builder builder(String host, int port) {
|
||||
return new Builder(host, port);
|
||||
}
|
||||
|
@ -280,6 +285,9 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.URL)) {
|
||||
builder.fromUrl(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT)) {
|
||||
builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser,
|
||||
HttpRequest.Field.CONNECTION_TIMEOUT.toString()));
|
||||
|
@ -288,6 +296,9 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
pe, currentFieldName);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT)) {
|
||||
builder.readTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT_HUMAN)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString()));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
|
|
|
@ -31,9 +31,6 @@ import static org.hamcrest.Matchers.hasEntry;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class HttpRequestTemplateTests extends ESTestCase {
|
||||
|
||||
public void testBodyWithXContent() throws Exception {
|
||||
|
@ -122,11 +119,11 @@ public class HttpRequestTemplateTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
builder.putHeader("_key", TextTemplate.inline("_value"));
|
||||
}
|
||||
long connectionTimeout = randomBoolean() ? 0 : randomIntBetween(5, 10);
|
||||
long connectionTimeout = randomBoolean() ? 0 : randomIntBetween(5, 100000);
|
||||
if (connectionTimeout > 0) {
|
||||
builder.connectionTimeout(TimeValue.timeValueSeconds(connectionTimeout));
|
||||
}
|
||||
long readTimeout = randomBoolean() ? 0 : randomIntBetween(5, 10);
|
||||
long readTimeout = randomBoolean() ? 0 : randomIntBetween(5, 100000);
|
||||
if (readTimeout > 0) {
|
||||
builder.readTimeout(TimeValue.timeValueSeconds(readTimeout));
|
||||
}
|
||||
|
@ -146,7 +143,7 @@ public class HttpRequestTemplateTests extends ESTestCase {
|
|||
xContentParser.nextToken();
|
||||
HttpRequestTemplate parsed = parser.parse(xContentParser);
|
||||
|
||||
assertThat(parsed, equalTo(template));
|
||||
assertEquals(template, parsed);
|
||||
}
|
||||
|
||||
public void testParsingFromUrl() throws Exception {
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.watcher.support;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
|
@ -19,7 +20,10 @@ import org.joda.time.DateTime;
|
|||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -140,7 +144,7 @@ public class WatcherDateTimeUtils {
|
|||
}
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
try {
|
||||
TimeValue value = TimeValue.parseTimeValue(parser.text(), null, settingName);
|
||||
TimeValue value = parseTimeValueSupportingFractional(parser.text(), settingName);
|
||||
if (value.millis() < 0) {
|
||||
throw new ElasticsearchParseException("could not parse time value [{}]. Time value cannot be negative.", parser.text());
|
||||
}
|
||||
|
@ -154,6 +158,47 @@ public class WatcherDateTimeUtils {
|
|||
"instead", token);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a {@link TimeValue} with support for fractional values.
|
||||
*/
|
||||
public static TimeValue parseTimeValueSupportingFractional(@Nullable String sValue, String settingName) {
|
||||
// This code is lifted almost straight from 2.x's TimeValue.java
|
||||
Objects.requireNonNull(settingName);
|
||||
if (sValue == null) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
long millis;
|
||||
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
|
||||
if (lowerSValue.endsWith("ms")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 2)));
|
||||
} else if (lowerSValue.endsWith("s")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 1000);
|
||||
} else if (lowerSValue.endsWith("m")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("h")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("d")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 24 * 60 * 60 * 1000);
|
||||
} else if (lowerSValue.endsWith("w")) {
|
||||
millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 7 * 24 * 60 * 60 * 1000);
|
||||
} else if (lowerSValue.equals("-1")) {
|
||||
// Allow this special value to be unit-less:
|
||||
millis = -1;
|
||||
} else if (lowerSValue.equals("0")) {
|
||||
// Allow this special value to be unit-less:
|
||||
millis = 0;
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
"Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized",
|
||||
settingName, sValue);
|
||||
}
|
||||
return new TimeValue(millis, TimeUnit.MILLISECONDS);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new ElasticsearchParseException("Failed to parse [{}]", e, sValue);
|
||||
}
|
||||
}
|
||||
|
||||
private static class ClockNowCallable implements Callable<Long> {
|
||||
|
||||
private final Clock clock;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.watcher.support;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
|
@ -180,10 +181,19 @@ public class WatcherIndexTemplateRegistry extends AbstractComponent implements C
|
|||
.build();
|
||||
request.settings(updatedSettings);
|
||||
}
|
||||
PutIndexTemplateResponse response = client.putTemplate(request);
|
||||
if (response.isAcknowledged() == false) {
|
||||
logger.error("Error adding watcher template [{}], request was not acknowledged", config.getTemplateName());
|
||||
}
|
||||
client.putTemplate(request, new ActionListener<PutIndexTemplateResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutIndexTemplateResponse response) {
|
||||
if (response.isAcknowledged() == false) {
|
||||
logger.error("Error adding watcher template [{}], request was not acknowledged", config.getTemplateName());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("Error adding watcher template [{}]", e, config.getTemplateName());
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -24,11 +24,10 @@ import org.elasticsearch.action.search.SearchScrollRequest;
|
|||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.common.init.proxy.ClientProxy;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
|
||||
/**
|
||||
* A lazily initialized proxy to an elasticsearch {@link Client}. Inject this proxy whenever a client
|
||||
|
@ -107,8 +106,8 @@ public class WatcherClientProxy extends ClientProxy {
|
|||
return client.admin().indices().refresh(preProcess(request)).actionGet(defaultSearchTimeout);
|
||||
}
|
||||
|
||||
public PutIndexTemplateResponse putTemplate(PutIndexTemplateRequest request) {
|
||||
public void putTemplate(PutIndexTemplateRequest request, ActionListener<PutIndexTemplateResponse> listener) {
|
||||
preProcess(request);
|
||||
return client.admin().indices().putTemplate(request).actionGet(defaultIndexTimeout);
|
||||
client.admin().indices().putTemplate(request, listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.watcher;
|
||||
|
||||
import org.elasticsearch.AbstractOldXPackIndicesBackwardsCompatibilityTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction;
|
||||
import org.elasticsearch.xpack.watcher.client.WatchSourceBuilder;
|
||||
import org.elasticsearch.xpack.watcher.client.WatcherClient;
|
||||
import org.elasticsearch.xpack.watcher.condition.always.AlwaysCondition;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.get.GetWatchResponse;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchResponse;
|
||||
import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
|
||||
import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval;
|
||||
import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
/**
|
||||
* Tests for watcher indexes created before 5.0.
|
||||
*/
|
||||
@TestLogging("_root:INFO")
|
||||
public class OldWatcherIndicesBackwardsCompatibilityIT extends AbstractOldXPackIndicesBackwardsCompatibilityTestCase {
|
||||
@Override
|
||||
public Settings nodeSettings(int ord) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(ord))
|
||||
.put(XPackSettings.WATCHER_ENABLED.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testAllVersionsTested() throws Exception {
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
if (v.before(Version.V_2_0_0)) continue; // unsupported indexes
|
||||
if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself
|
||||
if (v.isBeta() == true || v.isAlpha() == true || v.isRC() == true) continue; // don't check alphas etc
|
||||
expectedVersions.add("x-pack-" + v.toString() + ".zip");
|
||||
}
|
||||
for (String index : dataFiles) {
|
||||
if (expectedVersions.remove(index) == false) {
|
||||
logger.warn("Old indexes tests contain extra index: {}", index);
|
||||
}
|
||||
}
|
||||
if (expectedVersions.isEmpty() == false) {
|
||||
StringBuilder msg = new StringBuilder("Old index tests are missing indexes:");
|
||||
for (String expected : expectedVersions) {
|
||||
msg.append("\n" + expected);
|
||||
}
|
||||
fail(msg.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testOldIndexes() throws Exception {
|
||||
super.testOldIndexes();
|
||||
// Wait for watcher to fully start before shutting down
|
||||
assertBusy(() -> {
|
||||
assertEquals(WatcherState.STARTED, internalCluster().getInstance(WatcherService.class).state());
|
||||
});
|
||||
// Shutdown watcher on the last node so that the test can shutdown cleanly
|
||||
internalCluster().getInstance(WatcherLifeCycleService.class).stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void checkVersion(Version version) throws Exception {
|
||||
// Wait for watcher to actually start....
|
||||
assertBusy(() -> {
|
||||
assertEquals(WatcherState.STARTED, internalCluster().getInstance(WatcherService.class).state());
|
||||
});
|
||||
assertWatchIndexContentsWork(version);
|
||||
assertBasicWatchInteractions();
|
||||
}
|
||||
|
||||
void assertWatchIndexContentsWork(Version version) throws Exception {
|
||||
WatcherClient watcherClient = new WatcherClient(client());
|
||||
|
||||
// Fetch a basic watch
|
||||
GetWatchResponse bwcWatch = watcherClient.prepareGetWatch("bwc_watch").get();
|
||||
assertTrue(bwcWatch.isFound());
|
||||
assertNotNull(bwcWatch.getSource());
|
||||
Map<String, Object> source = bwcWatch.getSource().getAsMap();
|
||||
Map<?, ?> actions = (Map<?, ?>) source.get("actions");
|
||||
Map<?, ?> indexPayload = (Map<?, ?>) actions.get("index_payload");
|
||||
Map<?, ?> index = (Map<?, ?>) indexPayload.get("index");
|
||||
assertEquals("bwc_watch_index", index.get("index"));
|
||||
assertEquals("bwc_watch_type", index.get("doc_type"));
|
||||
|
||||
if (version.onOrAfter(Version.V_2_3_0)) {
|
||||
/* Fetch a watch with a funny timeout to verify loading fractional time values. This watch is only built in >= 2.3 because
|
||||
* email attachments aren't supported before that. */
|
||||
bwcWatch = watcherClient.prepareGetWatch("bwc_funny_timeout").get();
|
||||
assertTrue(bwcWatch.isFound());
|
||||
assertNotNull(bwcWatch.getSource());
|
||||
source = bwcWatch.getSource().getAsMap();
|
||||
actions = (Map<?, ?>) source.get("actions");
|
||||
Map<?, ?> work = (Map<?, ?>) actions.get("work");
|
||||
Map<?, ?> email = (Map<?, ?>) work.get("email");
|
||||
Map<?, ?> attachments = (Map<?, ?>) email.get("attachments");
|
||||
Map<?, ?> attachment = (Map<?, ?>) attachments.get("test_report.pdf");
|
||||
Map<?, ?> http = (Map<?, ?>) attachment.get("http");
|
||||
Map<?, ?> request = (Map<?, ?>) http.get("request");
|
||||
assertEquals(96000, request.get("read_timeout_millis"));
|
||||
assertEquals("https", request.get("scheme"));
|
||||
assertEquals("example.com", request.get("host"));
|
||||
assertEquals("{{ctx.metadata.report_url}}", request.get("path"));
|
||||
assertEquals(8443, request.get("port"));
|
||||
Map<?, ?> auth = (Map<?, ?>) request.get("auth");
|
||||
Map<?, ?> basic = (Map<?, ?>) auth.get("basic");
|
||||
assertThat(basic, hasEntry("username", "Aladdin"));
|
||||
// password doesn't come back because it is hidden
|
||||
assertThat(basic, not(hasKey("password")));
|
||||
}
|
||||
|
||||
SearchResponse history = client().prepareSearch(".watch_history*").get();
|
||||
assertThat(history.getHits().totalHits(), greaterThanOrEqualTo(10L));
|
||||
}
|
||||
|
||||
void assertBasicWatchInteractions() throws Exception {
|
||||
WatcherClient watcherClient = new WatcherClient(client());
|
||||
|
||||
PutWatchResponse put = watcherClient.preparePutWatch("new_watch").setSource(new WatchSourceBuilder()
|
||||
.condition(AlwaysCondition.INSTANCE)
|
||||
.trigger(ScheduleTrigger.builder(new IntervalSchedule(Interval.seconds(1))))
|
||||
.addAction("awesome", LoggingAction.builder(new TextTemplate("test")))).get();
|
||||
assertTrue(put.isCreated());
|
||||
assertEquals(1, put.getVersion());
|
||||
|
||||
put = watcherClient.preparePutWatch("new_watch").setSource(new WatchSourceBuilder()
|
||||
.condition(AlwaysCondition.INSTANCE)
|
||||
.trigger(ScheduleTrigger.builder(new IntervalSchedule(Interval.seconds(1))))
|
||||
.addAction("awesome", LoggingAction.builder(new TextTemplate("test")))).get();
|
||||
assertFalse(put.isCreated());
|
||||
assertEquals(2, put.getVersion());
|
||||
|
||||
GetWatchResponse get = watcherClient.prepareGetWatch(put.getId()).get();
|
||||
assertTrue(get.isFound());
|
||||
{
|
||||
Map<?, ?> source = get.getSource().getAsMap();
|
||||
Map<?, ?> actions = (Map<?, ?>) source.get("actions");
|
||||
Map<?, ?> awesome = (Map<?, ?>) actions.get("awesome");
|
||||
Map<?, ?> logging = (Map<?, ?>) awesome.get("logging");
|
||||
assertEquals("info", logging.get("level"));
|
||||
assertEquals("test", logging.get("text"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.support;
|
|||
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -22,6 +23,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
|||
import static java.util.concurrent.TimeUnit.MINUTES;
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.xpack.watcher.support.WatcherDateTimeUtils.parseTimeValueSupportingFractional;
|
||||
import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.xContentParser;
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
|
@ -122,4 +124,56 @@ public class WatcherDateTimeUtilsTests extends ESTestCase {
|
|||
TimeValue parsed = WatcherDateTimeUtils.parseTimeValue(parser, "test");
|
||||
assertThat(parsed, nullValue());
|
||||
}
|
||||
|
||||
public void testParseTimeValueWithFractional() {
|
||||
assertEquals("This function exists so 5.x can be compatible with 2.x indices. It should be removed with 6.x", 5,
|
||||
Version.CURRENT.major);
|
||||
|
||||
// This code is lifted strait from 2.x's TimeValueTests.java
|
||||
assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10 ms", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10ms", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10 MS", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10MS", "test"));
|
||||
|
||||
assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10 s", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10s", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10 S", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10S", "test"));
|
||||
|
||||
assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("0.1s", "test"));
|
||||
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10 m", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10m", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10 M", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10M", "test"));
|
||||
|
||||
assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10 h", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10h", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10 H", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10H", "test"));
|
||||
|
||||
assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 d", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10d", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 D", "test"));
|
||||
assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10D", "test"));
|
||||
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 w", "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10w", "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 W", "test"));
|
||||
assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10W", "test"));
|
||||
|
||||
// Extra fractional tests just because that is the point
|
||||
assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("0.1s", "test"));
|
||||
assertEquals(new TimeValue(6, TimeUnit.SECONDS), parseTimeValueSupportingFractional("0.1m", "test"));
|
||||
assertEquals(new TimeValue(6, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1h", "test"));
|
||||
assertEquals(new TimeValue(144, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1d", "test"));
|
||||
assertEquals(new TimeValue(1008, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1w", "test"));
|
||||
|
||||
// And some crazy fractions just for fun
|
||||
assertEquals(new TimeValue(1700, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("1.7s", "test"));
|
||||
assertEquals(new TimeValue(162, TimeUnit.SECONDS), parseTimeValueSupportingFractional("2.7m", "test"));
|
||||
assertEquals(new TimeValue(5988, TimeUnit.MINUTES), parseTimeValueSupportingFractional("99.8h", "test"));
|
||||
assertEquals(new TimeValue(1057968, TimeUnit.SECONDS), parseTimeValueSupportingFractional("12.245d", "test"));
|
||||
assertEquals(new TimeValue(7258204799L, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("12.001w", "test"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -676,9 +676,6 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase
|
|||
"\n" +
|
||||
"admin:\n" +
|
||||
" cluster: [ 'manage' ]\n" +
|
||||
"transport_client:\n" +
|
||||
" cluster: [ 'transport_client' ]\n" +
|
||||
"\n" +
|
||||
"monitor:\n" +
|
||||
" cluster: [ 'monitor' ]\n"
|
||||
;
|
||||
|
|
Loading…
Reference in New Issue