Merge remote-tracking branch 'elastic/master' into feature/sql

Original commit: elastic/x-pack-elasticsearch@79f79ea1c2
This commit is contained in:
Igor Motov 2017-11-13 15:09:35 -05:00
commit 774f423d9e
25 changed files with 1140 additions and 229 deletions

View File

@ -10,3 +10,5 @@ password-protect your data as well as implement more advanced security measures
such as encrypting communications, role-based access control, IP filtering, and
auditing. For more information, see
{xpack-ref}/xpack-security.html[Securing the Elastic Stack].
include::securing-communications/configuring-tls-docker.asciidoc[]

View File

@ -0,0 +1,185 @@
[role="xpack"]
[[configuring-tls-docker]]
=== Encrypting Communications in an {es} Docker Image
Starting with version 6.0.0, {security} (Gold, Platinum or Enterprise subscriptions) https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[requires SSL/TLS]
encryption for the transport networking layer.
This section demonstrates an easy path to get started with SSL/TLS for both
HTTPS and transport using the `elasticsearch-platinum` docker image.
For further details, please refer to
{xpack-ref}/encrypting-communications.html[Encrypting Communications] and
https://www.elastic.co/subscriptions[available subscriptions].
[float]
==== Prepare the environment
<<docker,Install {es} with Docker>>.
Inside a new, empty, directory create the following **four files**:
`instances.yml`:
["source","yaml"]
----
instances:
- name: es01
dns:
- es01 <1>
- localhost
ip:
- 127.0.0.1
- name: es02
dns:
- es02
- localhost
ip:
- 127.0.0.1
----
<1> Allow use of embedded Docker DNS server names.
`.env`:
[source,yaml]
----
CERTS_DIR=/usr/share/elasticsearch/config/x-pack/certificates <1>
ELASTIC_PASSWORD=PleaseChangeMe <2>
----
<1> The path, inside the Docker image, where certificates are expected to be found.
<2> Initial password for the `elastic` user.
[[getting-starter-tls-create-certs-composefile]]
`create-certs.yml`:
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of {es} has not yet been released, so a
`create-certs.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","yaml",subs="attributes"]
----
version: '2.2'
services:
create_certs:
container_name: create_certs
image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version}
command: >
bash -c '
if [[ ! -d config/x-pack/certificates/certs ]]; then
mkdir config/x-pack/certificates/certs;
fi;
if [[ ! -f /local/certs/bundle.zip ]]; then
bin/x-pack/certgen --silent --in config/x-pack/certificates/instances.yml --out config/x-pack/certificates/certs/bundle.zip;
unzip config/x-pack/certificates/certs/bundle.zip -d config/x-pack/certificates/certs; <1>
fi;
chgrp -R 0 config/x-pack/certificates/certs
'
user: $\{UID:-1000\}
working_dir: /usr/share/elasticsearch
volumes: ['.:/usr/share/elasticsearch/config/x-pack/certificates']
----
<1> The new node certificates and CA certificate+key are placed under the local directory `certs`.
endif::[]
[[getting-starter-tls-create-docker-compose]]
`docker-compose.yml`:
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","yaml",subs="attributes"]
----
version: '2.2'
services:
es01:
container_name: es01
image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version}
environment:
- node.name=es01
- discovery.zen.minimum_master_nodes=2
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1>
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.http.ssl.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate <2>
- xpack.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.ssl.certificate=$CERTS_DIR/es01/es01.crt
- xpack.ssl.key=$CERTS_DIR/es01/es01.key
volumes: ['esdata_01:/usr/share/elasticsearch/data', './certs:$CERTS_DIR']
ports:
- 9200:9200
healthcheck:
test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
interval: 30s
timeout: 10s
retries: 5
es02:
container_name: es02
image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version}
environment:
- node.name=es02
- discovery.zen.minimum_master_nodes=2
- ELASTIC_PASSWORD=$ELASTIC_PASSWORD
- discovery.zen.ping.unicast.hosts=es01
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.http.ssl.enabled=true
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
- xpack.ssl.certificate=$CERTS_DIR/es02/es02.crt
- xpack.ssl.key=$CERTS_DIR/es02/es02.key
volumes: ['esdata_02:/usr/share/elasticsearch/data', './certs:$CERTS_DIR']
wait_until_ready:
image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version}
command: /usr/bin/true
depends_on: {"es01": {"condition": "service_healthy"}}
volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}}
----
<1> Bootstrap `elastic` with the password defined in `.env`. See {xpack-ref}/setting-up-authentication.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password].
<2> Disable verification of authenticity for inter-node communication. Allows
creating self-signed certificates without having to pin specific internal IP addresses.
endif::[]
[float]
==== Run the example
. Generate the certificates (only needed once):
+
--
["source","sh"]
----
docker-compose -f create-certs.yml up
----
--
. Start two {es} nodes configured for SSL/TLS:
+
--
["source","sh"]
----
docker-compose up -d
----
--
. Access the {es} API over SSL/TLS using the bootstrapped password:
+
--
["source","sh"]
----
curl --cacert certs/ca/ca.crt -u elastic:PleaseChangeMe https://localhost:9200
----
// NOTCONSOLE
--
. The `setup-passwords` tool can also be used to generate random passwords for
all users:
+
--
["source","sh"]
----
docker exec es01 /bin/bash -c "bin/x-pack/setup-passwords auto --batch -Expack.ssl.certificate=x-pack/certificates/es01/es01.crt -Expack.ssl.certificate_authorities=x-pack/certificates/ca/ca.crt -Expack.ssl.key=x-pack/certificates/es01/es01.key --url https://localhost:9200"
----
--

View File

@ -0,0 +1,400 @@
[role="xpack"]
[[docker]]
=== Install {es} with Docker
{es} is also available as Docker images.
The images use https://hub.docker.com/_/centos/[centos:7] as the base image and
are available with {xpack-ref}/xpack-introduction.html[X-Pack].
A list of all published Docker images and tags can be found in
https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found
on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub].
==== Image types
The images are available in three different configurations or "flavors". The
`basic` flavor, which is the default, ships with {xpack} Basic features
pre-installed and automatically activated with a free licence. The `platinum`
flavor features all {xpack} functionally under a 30-day trial licence. The `oss`
flavor does not include {xpack}, and contains only open-source {es}.
NOTE: {xpack-ref}/xpack-security.html[X-Pack Security] is enabled in the `platinum`
image. To access your cluster, it's necessary to set an initial password for the
`elastic` user. The initial password can be set at start up time via the
`ELASTIC_PASSWORD` environment variable:
["source","txt",subs="attributes"]
--------------------------------------------
docker run -e ELASTIC_PASSWORD=MagicWord {docker-repo}-platinum:{version}
--------------------------------------------
NOTE: The `platinum` image includes a trial license for 30 days. After that, you
can obtain one of the https://www.elastic.co/subscriptions[available
subscriptions] or revert to a Basic licence. The Basic license is free and
includes a selection of {xpack} features.
Obtaining {Es} for Docker is as simple as issuing a +docker pull+ command
against the Elastic Docker registry.
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of {es} has not yet been released, so no
Docker image is currently available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
Docker images can be retrieved with the following commands:
["source","sh",subs="attributes"]
--------------------------------------------
docker pull {docker-repo}:{version}
docker pull {docker-repo}-platinum:{version}
docker pull {docker-repo}-oss:{version}
--------------------------------------------
endif::[]
[[docker-cli-run]]
==== Running {es} from the command line
[[docker-cli-run-dev-mode]]
===== Development mode
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of the {es} Docker image has not yet been released.
endif::[]
ifeval::["{release-state}"!="unreleased"]
{es} can be quickly started for development or testing use with the following command:
["source","sh",subs="attributes"]
--------------------------------------------
docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" {docker-image}
--------------------------------------------
endif::[]
[[docker-cli-run-prod-mode]]
===== Production mode
[[docker-prod-prerequisites]]
[IMPORTANT]
=========================
The `vm.max_map_count` kernel setting needs to be set to at least `262144` for
production use. Depending on your platform:
* Linux
+
--
The `vm.max_map_count` setting should be set permanently in `/etc/sysctl.conf`:
[source,sh]
--------------------------------------------
$ grep vm.max_map_count /etc/sysctl.conf
vm.max_map_count=262144
----------------------------------
To apply the setting on a live system type: `sysctl -w vm.max_map_count=262144`
--
* macOS with https://docs.docker.com/engine/installation/mac/#/docker-for-mac[Docker for Mac]
+
--
The `vm.max_map_count` setting must be set within the xhyve virtual machine:
["source","sh"]
--------------------------------------------
$ screen ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/tty
--------------------------------------------
Log in with 'root' and no password.
Then configure the `sysctl` setting as you would for Linux:
["source","sh"]
--------------------------------------------
sysctl -w vm.max_map_count=262144
--------------------------------------------
--
* Windows and macOS with https://www.docker.com/products/docker-toolbox[Docker Toolbox]
+
--
The `vm.max_map_count` setting must be set via docker-machine:
["source","txt"]
--------------------------------------------
docker-machine ssh
sudo sysctl -w vm.max_map_count=262144
--------------------------------------------
--
=========================
The following example brings up a cluster comprising two {es} nodes.
To bring up the cluster, use the
<<docker-prod-cluster-composefile,`docker-compose.yml`>> and just type:
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","sh"]
--------------------------------------------
docker-compose up
--------------------------------------------
endif::[]
[NOTE]
`docker-compose` is not pre-installed with Docker on Linux.
Instructions for installing it can be found on the
https://docs.docker.com/compose/install/#install-using-pip[Docker Compose webpage].
The node `elasticsearch` listens on `localhost:9200` while `elasticsearch2`
talks to `elasticsearch` over a Docker network.
This example also uses
https://docs.docker.com/engine/tutorials/dockervolumes[Docker named volumes],
called `esdata1` and `esdata2` which will be created if not already present.
[[docker-prod-cluster-composefile]]
`docker-compose.yml`:
ifeval::["{release-state}"=="unreleased"]
WARNING: Version {version} of {es} has not yet been released, so a
`docker-compose.yml` is not available for this version.
endif::[]
ifeval::["{release-state}"!="unreleased"]
["source","yaml",subs="attributes"]
--------------------------------------------
version: 2.2
services:
elasticsearch:
image: {docker-image}
container_name: elasticsearch
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata1:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- esnet
elasticsearch2:
image: {docker-image}
container_name: elasticsearch2
environment:
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "discovery.zen.ping.unicast.hosts=elasticsearch"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata2:/usr/share/elasticsearch/data
networks:
- esnet
volumes:
esdata1:
driver: local
esdata2:
driver: local
networks:
esnet:
--------------------------------------------
endif::[]
To stop the cluster, type `docker-compose down`. Data volumes will persist,
so it's possible to start the cluster again with the same data using
`docker-compose up`.
To destroy the cluster **and the data volumes**, just type
`docker-compose down -v`.
===== Inspect status of cluster:
["source","txt"]
--------------------------------------------
curl http://127.0.0.1:9200/_cat/health
1472225929 15:38:49 docker-cluster green 2 2 4 2 0 0 0 0 - 100.0%
--------------------------------------------
// NOTCONSOLE
Log messages go to the console and are handled by the configured Docker logging
driver. By default you can access logs with `docker logs`.
[[docker-configuration-methods]]
==== Configuring {es} with Docker
{es} loads its configuration from files under `/usr/share/elasticsearch/config/`.
These configuration files are documented in <<settings>> and <<jvm-options>>.
The image offers several methods for configuring {es} settings with the
conventional approach being to provide customized files, that is to say
`elasticsearch.yml`, but it's also possible to use environment variables to set
options:
===== A. Present the parameters via Docker environment variables
For example, to define the cluster name with `docker run` you can pass
`-e "cluster.name=mynewclustername"`. Double quotes are required.
===== B. Bind-mounted configuration
Create your custom config file and mount this over the image's corresponding file.
For example, bind-mounting a `custom_elasticsearch.yml` with `docker run` can be
accomplished with the parameter:
["source","sh"]
--------------------------------------------
-v full_path_to/custom_elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
--------------------------------------------
IMPORTANT: The container **runs {es} as user `elasticsearch` using
uid:gid `1000:1000`**. Bind mounted host directories and files, such as
`custom_elasticsearch.yml` above, **need to be accessible by this user**. For the https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#path-settings[data and log dirs],
such as `/usr/share/elasticsearch/data`, write access is required as well.
Also see note 1 below.
===== C. Customized image
In some environments, it may make more sense to prepare a custom image containing
your configuration. A `Dockerfile` to achieve this may be as simple as:
["source","sh",subs="attributes"]
--------------------------------------------
FROM docker.elastic.co/elasticsearch/elasticsearch:{version}
COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/
--------------------------------------------
You could then build and try the image with something like:
["source","sh"]
--------------------------------------------
docker build --tag=elasticsearch-custom .
docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom
--------------------------------------------
===== D. Override the image's default https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options[CMD]
Options can be passed as command-line options to the {es} process by
overriding the default command for the image. For example:
["source","sh"]
--------------------------------------------
docker run <various parameters> bin/elasticsearch -Ecluster.name=mynewclustername
--------------------------------------------
[[next-getting-started-tls-docker]]
==== Configuring SSL/TLS with the {es} Docker image
See <<configuring-tls-docker>>.
==== Notes for production use and defaults
We have collected a number of best practices for production use.
Any Docker parameters mentioned below assume the use of `docker run`.
. By default, {es} runs inside the container as user `elasticsearch` using
uid:gid `1000:1000`.
+
--
CAUTION: One exception is https://docs.openshift.com/container-platform/3.6/creating_images/guidelines.html#openshift-specific-guidelines[Openshift],
which runs containers using an arbitrarily assigned user ID. Openshift will
present persistent volumes with the gid set to `0` which will work without any
adjustments.
If you are bind-mounting a local directory or file, ensure it is readable by
this user, while the <<path-settings,data and log dirs>> additionally require
write access. A good strategy is to grant group access to gid `1000` or `0` for
the local directory. As an example, to prepare a local directory for storing
data through a bind-mount:
mkdir esdatadir
chmod g+rwx esdatadir
chgrp 1000 esdatadir
As a last resort, you can also force the container to mutate the ownership of
any bind-mounts used for the <<path-settings,data and log dirs>> through the
environment variable `TAKE_FILE_OWNERSHIP`. Inn this case, they will be owned by
uid:gid `1000:0` providing read/write access to the {es} process as required.
--
. It is important to ensure increased ulimits for
<<setting-system-settings,nofile>> and <<max-number-threads-check,nproc>> are
available for the {es} containers. Verify the https://github.com/moby/moby/tree/ea4d1243953e6b652082305a9c3cda8656edab26/contrib/init[init system]
for the Docker daemon is already setting those to acceptable values and, if
needed, adjust them in the Daemon, or override them per container, for example
using `docker run`:
+
--
--ulimit nofile=65536:65536
NOTE: One way of checking the Docker daemon defaults for the aforementioned
ulimits is by running:
docker run --rm centos:7 /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'
--
. Swapping needs to be disabled for performance and node stability. This can be
achieved through any of the methods mentioned in the
<<setup-configuration-memory,{es} docs>>. If you opt for the
`bootstrap.memory_lock: true` approach, apart from defining it through any of
the <<docker-configuration-methods,configuration methods>>, you will
additionally need the `memlock: true` ulimit, either defined in the
https://docs.docker.com/engine/reference/commandline/dockerd/#default-ulimits[Docker Daemon]
or specifically set for the container. This is demonstrated above in the
<<docker-prod-cluster-composefile,docker-compose.yml>>. If using `docker run`:
+
--
-e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1
--
. The image https://docs.docker.com/engine/reference/builder/#/expose[exposes]
TCP ports 9200 and 9300. For clusters it is recommended to randomize the
published ports with `--publish-all`, unless you are pinning one container per host.
. Use the `ES_JAVA_OPTS` environment variable to set heap size. For example, to
use 16GB, use `-e ES_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`.
. Pin your deployments to a specific version of the {es} Docker image, for
example +docker.elastic.co/elasticsearch/elasticsearch:{version}+.
. Always use a volume bound on `/usr/share/elasticsearch/data`, as shown in the
<<docker-cli-run-prod-mode,production example>>, for the following reasons:
.. The data of your {es} node won't be lost if the container is killed
.. {es} is I/O sensitive and the Docker storage driver is not ideal for fast I/O
.. It allows the use of advanced
https://docs.docker.com/engine/extend/plugins/#volume-plugins[Docker volume plugins]
. If you are using the devicemapper storage driver, make sure you are not using
the default `loop-lvm` mode. Configure docker-engine to use
https://docs.docker.com/engine/userguide/storagedriver/device-mapper-driver/#configure-docker-with-devicemapper[direct-lvm]
instead.
. Consider centralizing your logs by using a different
https://docs.docker.com/engine/admin/logging/overview/[logging driver]. Also
note that the default json-file logging driver is not ideally suited for
production use.
include::next-steps.asciidoc[]

View File

@ -0,0 +1,10 @@
[role="exclude"]
==== Next steps
You now have a test {es} environment set up. Before you start
serious development or go into production with {es}, you must do some additional
setup:
* Learn how to <<settings,configure Elasticsearch>>.
* Configure <<important-settings,important Elasticsearch settings>>.
* Configure <<system-config,important system settings>>.

View File

@ -333,7 +333,7 @@ configurations {
testArtifacts.extendsFrom testRuntime
}
task testJar(type: Jar) {
classifier "test"
appendix 'test'
from sourceSets.test.output
}
artifacts {

View File

@ -18,6 +18,7 @@ import org.elasticsearch.xpack.ml.job.results.Bucket;
import org.elasticsearch.xpack.ml.job.results.BucketInfluencer;
import org.elasticsearch.xpack.ml.job.results.CategoryDefinition;
import org.elasticsearch.xpack.ml.job.results.Forecast;
import org.elasticsearch.xpack.ml.job.results.ForecastRequestStats;
import org.elasticsearch.xpack.ml.job.results.Influence;
import org.elasticsearch.xpack.ml.job.results.Influencer;
import org.elasticsearch.xpack.ml.job.results.ModelPlot;
@ -337,8 +338,25 @@ public class ElasticsearchMappings {
.startObject(Forecast.FORECAST_ID.getPreferredName())
.field(TYPE, LONG)
.endObject();
}
// Forecast Stats Output
// re-used: PROCESSING_TIME_MS, PROCESSED_RECORD_COUNT, LATEST_RECORD_TIME
builder.startObject(ForecastRequestStats.START_TIME.getPreferredName())
.field(TYPE, DATE)
.endObject()
.startObject(ForecastRequestStats.END_TIME.getPreferredName())
.field(TYPE, DATE)
.endObject()
.startObject(ForecastRequestStats.MESSAGE.getPreferredName())
.field(TYPE, KEYWORD)
.endObject()
.startObject(ForecastRequestStats.PROGRESS.getPreferredName())
.field(TYPE, DOUBLE)
.endObject()
.startObject(ForecastRequestStats.STATUS.getPreferredName())
.field(TYPE, KEYWORD)
.endObject();
}
/**
* AnomalyRecord fields to be added under the 'properties' section of the mapping

View File

@ -30,6 +30,7 @@ import org.elasticsearch.xpack.ml.job.results.Bucket;
import org.elasticsearch.xpack.ml.job.results.BucketInfluencer;
import org.elasticsearch.xpack.ml.job.results.CategoryDefinition;
import org.elasticsearch.xpack.ml.job.results.Forecast;
import org.elasticsearch.xpack.ml.job.results.ForecastRequestStats;
import org.elasticsearch.xpack.ml.job.results.Influencer;
import org.elasticsearch.xpack.ml.job.results.ModelPlot;
@ -158,6 +159,13 @@ public class JobResultsPersister extends AbstractComponent {
return this;
}
public Builder persistForecastRequestStats(ForecastRequestStats forecastRequestStats) {
logger.trace("[{}] ES BULK ACTION: index forecast request stats to index [{}] with ID [{}]", jobId, indexName,
forecastRequestStats.getId());
indexResult(forecastRequestStats.getId(), forecastRequestStats, Forecast.RESULT_TYPE_VALUE);
return this;
}
private void indexResult(String id, ToXContent resultDoc, String resultType) {
try (XContentBuilder content = toXContentBuilder(resultDoc)) {
bulkRequest.add(new IndexRequest(indexName, DOC_TYPE, id).source(content));

View File

@ -26,7 +26,7 @@ import org.elasticsearch.xpack.ml.job.results.AutodetectResult;
import org.elasticsearch.xpack.ml.job.results.Bucket;
import org.elasticsearch.xpack.ml.job.results.CategoryDefinition;
import org.elasticsearch.xpack.ml.job.results.Forecast;
import org.elasticsearch.xpack.ml.job.results.ForecastStats;
import org.elasticsearch.xpack.ml.job.results.ForecastRequestStats;
import org.elasticsearch.xpack.ml.job.results.Influencer;
import org.elasticsearch.xpack.ml.job.results.ModelPlot;
@ -195,15 +195,20 @@ public class AutoDetectResultProcessor {
if (forecast != null) {
context.bulkResultsPersister.persistForecast(forecast);
}
ForecastStats forecastStats = result.getForecastStats();
if (forecastStats != null) {
// forecast stats are send by autodetect but do not get persisted,
// still they mark the end of a forecast
ForecastRequestStats forecastRequestStats = result.getForecastRequestStats();
if (forecastRequestStats != null) {
LOGGER.trace("Received Forecast Stats [{}]", forecastRequestStats.getId());
context.bulkResultsPersister.persistForecastRequestStats(forecastRequestStats);
LOGGER.trace("Received Forecast Stats [{}]", forecastStats.getId());
double forecastProgress = forecastRequestStats.getProgress();
// forecast stats mark the end of a forecast, therefore commit whatever we have
context.bulkResultsPersister.executeRequest();
// persist if progress is 0 (probably some error condition) or 1 (finished),
// otherwise rely on the count-based trigger
if (forecastProgress == 0.0 || forecastProgress >= 1.0) {
// if forecast stats progress is 1.0 it marks the end of a forecast,
// therefore commit whatever we have
context.bulkResultsPersister.executeRequest();
}
}
ModelSizeStats modelSizeStats = result.getModelSizeStats();
if (modelSizeStats != null) {

View File

@ -205,6 +205,12 @@ public class CppLogMessageHandler implements Closeable {
parseMessage(xContent, bytesRef.slice(from, nextMarker - from));
}
from = nextMarker + 1;
if (from < bytesRef.length() && bytesRef.get(from) == (byte) 0) {
// This is to work around the problem of log4cxx on Windows
// outputting UTF-16 instead of UTF-8. For full details see
// https://github.com/elastic/machine-learning-cpp/issues/385
++from;
}
}
if (from >= bytesRef.length()) {
return null;

View File

@ -31,8 +31,8 @@ public class AutodetectResult implements ToXContentObject, Writeable {
public static final ConstructingObjectParser<AutodetectResult, Void> PARSER = new ConstructingObjectParser<>(
TYPE.getPreferredName(), a -> new AutodetectResult((Bucket) a[0], (List<AnomalyRecord>) a[1], (List<Influencer>) a[2],
(Quantiles) a[3], a[4] == null ? null : ((ModelSnapshot.Builder) a[4]).build(),
a[5] == null ? null : ((ModelSizeStats.Builder) a[5]).build(),
(ModelPlot) a[6], (Forecast) a[7], (ForecastStats) a[8], (CategoryDefinition) a[9], (FlushAcknowledgement) a[10]));
a[5] == null ? null : ((ModelSizeStats.Builder) a[5]).build(), (ModelPlot) a[6],
(Forecast) a[7], (ForecastRequestStats) a[8], (CategoryDefinition) a[9], (FlushAcknowledgement) a[10]));
static {
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Bucket.PARSER, Bucket.RESULT_TYPE_FIELD);
@ -44,7 +44,8 @@ public class AutodetectResult implements ToXContentObject, Writeable {
ModelSizeStats.RESULT_TYPE_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelPlot.PARSER, ModelPlot.RESULTS_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Forecast.PARSER, Forecast.RESULTS_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastStats.PARSER, ForecastStats.RESULTS_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastRequestStats.PARSER,
ForecastRequestStats.RESULTS_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), CategoryDefinition.PARSER, CategoryDefinition.TYPE);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), FlushAcknowledgement.PARSER, FlushAcknowledgement.TYPE);
}
@ -57,13 +58,13 @@ public class AutodetectResult implements ToXContentObject, Writeable {
private final ModelSizeStats modelSizeStats;
private final ModelPlot modelPlot;
private final Forecast forecast;
private final ForecastStats forecastStats;
private final ForecastRequestStats forecastRequestStats;
private final CategoryDefinition categoryDefinition;
private final FlushAcknowledgement flushAcknowledgement;
public AutodetectResult(Bucket bucket, List<AnomalyRecord> records, List<Influencer> influencers, Quantiles quantiles,
ModelSnapshot modelSnapshot, ModelSizeStats modelSizeStats, ModelPlot modelPlot, Forecast forecast,
ForecastStats forecastStats, CategoryDefinition categoryDefinition, FlushAcknowledgement flushAcknowledgement) {
ForecastRequestStats forecastRequestStats, CategoryDefinition categoryDefinition, FlushAcknowledgement flushAcknowledgement) {
this.bucket = bucket;
this.records = records;
this.influencers = influencers;
@ -72,7 +73,7 @@ public class AutodetectResult implements ToXContentObject, Writeable {
this.modelSizeStats = modelSizeStats;
this.modelPlot = modelPlot;
this.forecast = forecast;
this.forecastStats = forecastStats;
this.forecastRequestStats = forecastRequestStats;
this.categoryDefinition = categoryDefinition;
this.flushAcknowledgement = flushAcknowledgement;
}
@ -131,13 +132,13 @@ public class AutodetectResult implements ToXContentObject, Writeable {
this.forecast = null;
}
if (in.readBoolean()) {
this.forecastStats = new ForecastStats(in);
this.forecastRequestStats = new ForecastRequestStats(in);
} else {
this.forecastStats = null;
this.forecastRequestStats = null;
}
} else {
this.forecast = null;
this.forecastStats = null;
this.forecastRequestStats = null;
}
}
@ -155,7 +156,7 @@ public class AutodetectResult implements ToXContentObject, Writeable {
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
writeNullable(forecast, out);
writeNullable(forecastStats, out);
writeNullable(forecastRequestStats, out);
}
}
@ -186,7 +187,7 @@ public class AutodetectResult implements ToXContentObject, Writeable {
addNullableField(ModelSizeStats.RESULT_TYPE_FIELD, modelSizeStats, builder);
addNullableField(ModelPlot.RESULTS_FIELD, modelPlot, builder);
addNullableField(Forecast.RESULTS_FIELD, forecast, builder);
addNullableField(ForecastStats.RESULTS_FIELD, forecastStats, builder);
addNullableField(ForecastRequestStats.RESULTS_FIELD, forecastRequestStats, builder);
addNullableField(CategoryDefinition.TYPE, categoryDefinition, builder);
addNullableField(FlushAcknowledgement.TYPE, flushAcknowledgement, builder);
builder.endObject();
@ -237,8 +238,8 @@ public class AutodetectResult implements ToXContentObject, Writeable {
return forecast;
}
public ForecastStats getForecastStats() {
return forecastStats;
public ForecastRequestStats getForecastRequestStats() {
return forecastRequestStats;
}
public CategoryDefinition getCategoryDefinition() {
@ -251,8 +252,8 @@ public class AutodetectResult implements ToXContentObject, Writeable {
@Override
public int hashCode() {
return Objects.hash(bucket, records, influencers, categoryDefinition, flushAcknowledgement, modelPlot, forecast, forecastStats,
modelSizeStats, modelSnapshot, quantiles);
return Objects.hash(bucket, records, influencers, categoryDefinition, flushAcknowledgement, modelPlot, forecast,
forecastRequestStats, modelSizeStats, modelSnapshot, quantiles);
}
@Override
@ -271,7 +272,7 @@ public class AutodetectResult implements ToXContentObject, Writeable {
Objects.equals(flushAcknowledgement, other.flushAcknowledgement) &&
Objects.equals(modelPlot, other.modelPlot) &&
Objects.equals(forecast, other.forecast) &&
Objects.equals(forecastStats, other.forecastStats) &&
Objects.equals(forecastRequestStats, other.forecastRequestStats) &&
Objects.equals(modelSizeStats, other.modelSizeStats) &&
Objects.equals(modelSnapshot, other.modelSnapshot) &&
Objects.equals(quantiles, other.quantiles);

View File

@ -0,0 +1,252 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
import org.elasticsearch.xpack.ml.job.config.Job;
import java.io.IOException;
import java.time.Instant;
import java.util.Locale;
import java.util.Objects;
/**
* Model ForecastRequestStats POJO.
*
* This information is produced by the autodetect process and contains
* information about errors, progress and counters. There is exactly 1 document
* per forecast request, getting updated while the request is processed.
*/
public class ForecastRequestStats implements ToXContentObject, Writeable {
/**
* Result type
*/
public static final String RESULT_TYPE_VALUE = "model_forecast_request_stats";
public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE);
public static final ParseField FORECAST_ID = new ParseField("forecast_id");
public static final ParseField START_TIME = new ParseField("forecast_start_timestamp");
public static final ParseField END_TIME = new ParseField("forecast_end_timestamp");
public static final ParseField MESSAGE = new ParseField("forecast_message");
public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms");
public static final ParseField PROGRESS = new ParseField("forecast_progress");
public static final ParseField PROCESSED_RECORD_COUNT = new ParseField("processed_record_count");
public static final ParseField STATUS = new ParseField("forecast_status");
public static final ConstructingObjectParser<ForecastRequestStats, Void> PARSER =
new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new ForecastRequestStats((String) a[0], (long) a[1]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), FORECAST_ID);
PARSER.declareString((modelForecastRequestStats, s) -> {}, Result.RESULT_TYPE);
PARSER.declareLong(ForecastRequestStats::setRecordCount, PROCESSED_RECORD_COUNT);
PARSER.declareString(ForecastRequestStats::setMessage, MESSAGE);
PARSER.declareField(ForecastRequestStats::setStartTimeStamp,
p -> Instant.ofEpochMilli(p.longValue()), START_TIME, ValueType.LONG);
PARSER.declareField(ForecastRequestStats::setEndTimeStamp,
p -> Instant.ofEpochMilli(p.longValue()), END_TIME, ValueType.LONG);
PARSER.declareDouble(ForecastRequestStats::setProgress, PROGRESS);
PARSER.declareLong(ForecastRequestStats::setProcessingTime, PROCESSING_TIME_MS);
PARSER.declareField(ForecastRequestStats::setStatus, p -> ForecastRequestStatus.fromString(p.text()), STATUS, ValueType.STRING);
}
public enum ForecastRequestStatus implements Writeable {
OK, FAILED, STOPPED, STARTED, FINISHED, SCHEDULED;
public static ForecastRequestStatus fromString(String statusName) {
return valueOf(statusName.trim().toUpperCase(Locale.ROOT));
}
public static ForecastRequestStatus readFromStream(StreamInput in) throws IOException {
return in.readEnum(ForecastRequestStatus.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(this);
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
private final String jobId;
private final long forecastId;
private long recordCount;
private String message;
private Instant dateStarted = Instant.EPOCH;
private Instant dateEnded = Instant.EPOCH;
private double progress;
private long processingTime;
private ForecastRequestStatus status = ForecastRequestStatus.OK;
public ForecastRequestStats(String jobId, long forecastId) {
this.jobId = jobId;
this.forecastId = forecastId;
}
public ForecastRequestStats(StreamInput in) throws IOException {
jobId = in.readString();
forecastId = in.readLong();
recordCount = in.readLong();
message = in.readOptionalString();
dateStarted = Instant.ofEpochMilli(in.readVLong());
dateEnded = Instant.ofEpochMilli(in.readVLong());
progress = in.readDouble();
processingTime = in.readLong();
status = ForecastRequestStatus.readFromStream(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
out.writeLong(forecastId);
out.writeLong(recordCount);
out.writeOptionalString(message);
out.writeVLong(dateStarted.toEpochMilli());
out.writeVLong(dateEnded.toEpochMilli());
out.writeDouble(progress);
out.writeLong(processingTime);
status.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
builder.field(FORECAST_ID.getPreferredName(), forecastId);
builder.field(PROCESSED_RECORD_COUNT.getPreferredName(), recordCount);
if (message != null) {
builder.field(MESSAGE.getPreferredName(), message);
}
if (dateStarted.equals(Instant.EPOCH) == false) {
builder.field(START_TIME.getPreferredName(), dateStarted.toEpochMilli());
}
if (dateEnded.equals(Instant.EPOCH) == false) {
builder.field(END_TIME.getPreferredName(), dateEnded.toEpochMilli());
}
builder.field(PROGRESS.getPreferredName(), progress);
builder.field(PROCESSING_TIME_MS.getPreferredName(), processingTime);
builder.field(STATUS.getPreferredName(), status);
builder.endObject();
return builder;
}
public String getJobId() {
return jobId;
}
/**
* Return the document ID used for indexing. As there is 1 and only 1 document
* per forecast request, the id has no dynamic parts.
*
* @return id
*/
public String getId() {
return jobId + "_model_forecast_request_stats_" + forecastId;
}
public void setRecordCount(long recordCount) {
this.recordCount = recordCount;
}
public double getRecordCount() {
return recordCount;
}
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
public Instant getDateStarted() {
return dateStarted;
}
public void setStartTimeStamp(Instant dateStarted) {
this.dateStarted = dateStarted;
}
public Instant getDateEnded() {
return dateEnded;
}
public void setEndTimeStamp(Instant dateEnded) {
this.dateEnded = dateEnded;
}
/**
* Progress information of the ForecastRequest in the range 0 to 1,
* while 1 means finished
*
* @return progress value
*/
public double getProgress() {
return progress;
}
public void setProgress(double progress) {
this.progress = progress;
}
public long getProcessingTime() {
return processingTime;
}
public void setProcessingTime(long processingTime) {
this.processingTime = processingTime;
}
public ForecastRequestStatus getStatus() {
return status;
}
public void setStatus(ForecastRequestStatus jobStatus) {
Objects.requireNonNull(jobStatus, "[" + STATUS.getPreferredName() + "] must not be null");
this.status = jobStatus;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ForecastRequestStats == false) {
return false;
}
ForecastRequestStats that = (ForecastRequestStats) other;
return Objects.equals(this.jobId, that.jobId) &&
this.forecastId == that.forecastId &&
this.recordCount == that.recordCount &&
Objects.equals(this.message, that.message) &&
Objects.equals(this.dateStarted, that.dateStarted) &&
Objects.equals(this.dateEnded, that.dateEnded) &&
this.progress == that.progress &&
this.processingTime == that.processingTime &&
Objects.equals(this.status, that.status);
}
@Override
public int hashCode() {
return Objects.hash(jobId, forecastId, recordCount, message, dateStarted, dateEnded, progress,
processingTime, status);
}
}

View File

@ -1,114 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.ml.job.config.Job;
import java.io.IOException;
import java.util.Objects;
/**
* Model ForecastStats POJO.
*
* Note forecast stats are sent from the autodetect process but do not get
* indexed.
*/
public class ForecastStats implements ToXContentObject, Writeable {
/**
* Result type
*/
public static final String RESULT_TYPE_VALUE = "model_forecast_stats";
public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE);
public static final ParseField FORECAST_ID = new ParseField("forecast_id");
public static final ParseField RECORD_COUNT = new ParseField("forecast_record_count");
public static final ConstructingObjectParser<ForecastStats, Void> PARSER =
new ConstructingObjectParser<>(RESULT_TYPE_VALUE, a -> new ForecastStats((String) a[0], (long) a[1]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), FORECAST_ID);
PARSER.declareString((modelForecastStats, s) -> {}, Result.RESULT_TYPE);
PARSER.declareLong(ForecastStats::setRecordCount, RECORD_COUNT);
}
private final String jobId;
private final long forecastId;
private long recordCount;
public ForecastStats(String jobId, long forecastId) {
this.jobId = jobId;
this.forecastId = forecastId;
}
public ForecastStats(StreamInput in) throws IOException {
jobId = in.readString();
forecastId = in.readLong();
recordCount = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(jobId);
out.writeLong(forecastId);
out.writeLong(recordCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
builder.field(FORECAST_ID.getPreferredName(), forecastId);
builder.field(RECORD_COUNT.getPreferredName(), recordCount);
builder.endObject();
return builder;
}
public String getJobId() {
return jobId;
}
public String getId() {
return jobId + "_model_forecast_stats";
}
public void setRecordCount(long recordCount) {
this.recordCount = recordCount;
}
public double getRecordCount() {
return recordCount;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ForecastStats == false) {
return false;
}
ForecastStats that = (ForecastStats) other;
return Objects.equals(this.jobId, that.jobId) &&
this.forecastId == that.forecastId &&
this.recordCount == that.recordCount;
}
@Override
public int hashCode() {
return Objects.hash(jobId, forecastId, recordCount);
}
}

View File

@ -131,6 +131,12 @@ public final class ReservedFieldNames {
Forecast.FORECAST_PREDICTION.getPreferredName(),
Forecast.FORECAST_ID.getPreferredName(),
ForecastRequestStats.START_TIME.getPreferredName(),
ForecastRequestStats.END_TIME.getPreferredName(),
ForecastRequestStats.MESSAGE.getPreferredName(),
ForecastRequestStats.PROGRESS.getPreferredName(),
ForecastRequestStats.STATUS.getPreferredName(),
ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName(),
ModelSizeStats.TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(),
ModelSizeStats.TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(),

View File

@ -10,13 +10,17 @@ import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.license.License;
import org.elasticsearch.license.LicenseService;
import org.elasticsearch.license.LicenseUtils;
@ -29,6 +33,7 @@ import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.xpack.XPackSettings.SECURITY_ENABLED;
import static org.elasticsearch.xpack.XPackSettings.TRANSPORT_SSL_ENABLED;
@ -50,6 +55,7 @@ public class ClusterStatsCollector extends Collector {
*/
public static final Setting<TimeValue> CLUSTER_STATS_TIMEOUT = collectionTimeoutSetting("cluster.stats.timeout");
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final LicenseService licenseService;
private final Client client;
@ -58,9 +64,20 @@ public class ClusterStatsCollector extends Collector {
final XPackLicenseState licenseState,
final Client client,
final LicenseService licenseService) {
this(settings, clusterService, licenseState, client, licenseService, new IndexNameExpressionResolver(Settings.EMPTY));
}
ClusterStatsCollector(final Settings settings,
final ClusterService clusterService,
final XPackLicenseState licenseState,
final Client client,
final LicenseService licenseService,
final IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClusterStatsMonitoringDoc.TYPE, clusterService, CLUSTER_STATS_TIMEOUT, licenseState);
this.client = client;
this.licenseService = licenseService;
this.indexNameExpressionResolver = Objects.requireNonNull(indexNameExpressionResolver);
}
@Override
@ -82,7 +99,8 @@ public class ClusterStatsCollector extends Collector {
final String version = Version.CURRENT.toString();
final ClusterState clusterState = clusterService.state();
final License license = licenseService.getLicense();
final List<XPackFeatureSet.Usage> usage = collect(usageSupplier);
final List<XPackFeatureSet.Usage> xpackUsage = collect(usageSupplier);
final boolean apmIndicesExist = doAPMIndicesExist(clusterState);
// if they have any other type of license, then they are either okay or already know
final boolean clusterNeedsTLSEnabled = license.operationMode() == License.OperationMode.TRIAL &&
SECURITY_ENABLED.get(settings) &&
@ -91,7 +109,18 @@ public class ClusterStatsCollector extends Collector {
// Adds a cluster stats document
return Collections.singleton(
new ClusterStatsMonitoringDoc(clusterUUID(), timestamp(), interval, node, clusterName, version, clusterStats.getStatus(),
license, usage, clusterStats, clusterState, clusterNeedsTLSEnabled));
license, apmIndicesExist, xpackUsage, clusterStats, clusterState, clusterNeedsTLSEnabled));
}
boolean doAPMIndicesExist(final ClusterState clusterState) {
try {
final Index[] indices =
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.lenientExpandOpen(), "apm-*");
return indices.length > 0;
} catch (IndexNotFoundException | IllegalArgumentException e) {
return false;
}
}
@Nullable

View File

@ -51,6 +51,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc {
private final String clusterName;
private final String version;
private final License license;
private final boolean apmIndicesExist;
private final List<XPackFeatureSet.Usage> usages;
private final ClusterStatsResponse clusterStats;
private final ClusterState clusterState;
@ -65,6 +66,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc {
final String version,
final ClusterHealthStatus status,
@Nullable final License license,
final boolean apmIndicesExist,
@Nullable final List<XPackFeatureSet.Usage> usages,
@Nullable final ClusterStatsResponse clusterStats,
@Nullable final ClusterState clusterState,
@ -75,6 +77,7 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc {
this.version = Objects.requireNonNull(version);
this.status = Objects.requireNonNull(status);
this.license = license;
this.apmIndicesExist = apmIndicesExist;
this.usages = usages;
this.clusterStats = clusterStats;
this.clusterState = clusterState;
@ -93,6 +96,10 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc {
return license;
}
boolean getAPMIndicesExist() {
return apmIndicesExist;
}
List<XPackFeatureSet.Usage> getUsages() {
return usages;
}
@ -120,45 +127,57 @@ public class ClusterStatsMonitoringDoc extends MonitoringDoc {
if (license != null) {
builder.startObject("license");
Map<String, String> extraParams = new MapBuilder<String, String>()
.put(License.REST_VIEW_MODE, "true")
.map();
params = new ToXContent.DelegatingMapParams(extraParams, params);
license.toInnerXContent(builder, params);
builder.field("hkey", hash(license, getCluster()));
if (clusterNeedsTLSEnabled) {
builder.field("cluster_needs_tls", true);
{
Map<String, String> extraParams = new MapBuilder<String, String>()
.put(License.REST_VIEW_MODE, "true")
.map();
params = new ToXContent.DelegatingMapParams(extraParams, params);
license.toInnerXContent(builder, params);
builder.field("hkey", hash(license, getCluster()));
if (clusterNeedsTLSEnabled) {
builder.field("cluster_needs_tls", true);
}
}
builder.endObject();
}
if (clusterStats != null) {
builder.startObject("cluster_stats");
clusterStats.toXContent(builder, params);
{
clusterStats.toXContent(builder, params);
}
builder.endObject();
}
if (clusterState != null) {
builder.startObject("cluster_state");
builder.field("nodes_hash", nodesHash(clusterState.nodes()));
builder.field("status", status.name().toLowerCase(Locale.ROOT));
clusterState.toXContent(builder, CLUSTER_STATS_PARAMS);
{
builder.field("nodes_hash", nodesHash(clusterState.nodes()));
builder.field("status", status.name().toLowerCase(Locale.ROOT));
clusterState.toXContent(builder, CLUSTER_STATS_PARAMS);
}
builder.endObject();
}
if (usages != null) {
// in the future we may choose to add other usages under the stack_stats section, but it is only xpack for now
// it may also be combined on the UI side of phone-home to add things like "kibana" and "logstash" under "stack_stats"
builder.startObject("stack_stats");
builder.startObject("stack_stats");
{
// in the future, it may be useful to pass in an object that represents APM (and others), but for now this
// is good enough
builder.startObject("apm");
{
builder.field("found", apmIndicesExist);
}
builder.endObject();
if (usages != null) {
builder.startObject("xpack");
for (final XPackFeatureSet.Usage usage : usages) {
builder.field(usage.name(), usage);
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
}
/**

View File

@ -34,7 +34,8 @@ import static org.elasticsearch.xpack.security.Security.setting;
* Instead, realm configuration relies on the <code>validator</code> parameter to
* {@link Setting#groupSetting(String, Consumer, Setting.Property...)} in order to validate each realm in a way that respects the
* declared <code>type</code>.
* Internally, this validation delegates to {@link AbstractScopedSettings#validate(Settings)} so that validation is reasonably aligned
* Internally, this validation delegates to {@link AbstractScopedSettings#validate(Settings, boolean)} so that validation is reasonably
* aligned
* with the way we validate settings globally.
* </p>
* <p>
@ -172,7 +173,7 @@ public class RealmSettings {
settingSet.add(ORDER_SETTING);
final AbstractScopedSettings validator = new AbstractScopedSettings(settings, settingSet, Setting.Property.NodeScope) { };
try {
validator.validate(settings);
validator.validate(settings, false);
} catch (RuntimeException e) {
throw new IllegalArgumentException("incorrect configuration for realm [" + getFullSettingKey(name, "")
+ "] of type " + type, e);

View File

@ -37,7 +37,7 @@ grant codeBase "${codebase.elasticsearch-rest-client}" {
permission java.net.NetPermission "getProxySelector";
};
grant codeBase "${codebase.httpasyncclient-4.1.2.jar}" {
grant codeBase "${codebase.httpasyncclient}" {
// rest client uses system properties which gets the default proxy
permission java.net.NetPermission "getProxySelector";
};

View File

@ -468,7 +468,7 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase {
assertTrue(indexRoutingTable.allPrimaryShardsActive());
}
}
});
}, 30L, TimeUnit.SECONDS);
}
}
@ -485,7 +485,7 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase {
assertTrue(indexRoutingTable.allPrimaryShardsActive());
}
}
}, 20, TimeUnit.SECONDS);
}, 30L, TimeUnit.SECONDS);
}
}

View File

@ -36,7 +36,7 @@ public class AutodetectResultTests extends AbstractSerializingTestCase<Autodetec
ModelSizeStats.Builder modelSizeStats;
ModelPlot modelPlot;
Forecast forecast;
ForecastStats forecastStats;
ForecastRequestStats forecastRequestStats;
CategoryDefinition categoryDefinition;
FlushAcknowledgement flushAcknowledgement;
String jobId = "foo";
@ -92,9 +92,9 @@ public class AutodetectResultTests extends AbstractSerializingTestCase<Autodetec
forecast = null;
}
if (randomBoolean()) {
forecastStats = new ForecastStats(jobId, randomNonNegativeLong());
forecastRequestStats = new ForecastRequestStats(jobId, randomNonNegativeLong());
} else {
forecastStats = null;
forecastRequestStats = null;
}
if (randomBoolean()) {
categoryDefinition = new CategoryDefinition(jobId);
@ -108,7 +108,7 @@ public class AutodetectResultTests extends AbstractSerializingTestCase<Autodetec
flushAcknowledgement = null;
}
return new AutodetectResult(bucket, records, influencers, quantiles, modelSnapshot,
modelSizeStats == null ? null : modelSizeStats.build(), modelPlot, forecast, forecastStats, categoryDefinition,
modelSizeStats == null ? null : modelSizeStats.build(), modelPlot, forecast, forecastRequestStats, categoryDefinition,
flushAcknowledgement);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import org.elasticsearch.xpack.ml.job.results.ForecastRequestStats.ForecastRequestStatus;
import java.io.IOException;
import java.time.Instant;
public class ForecastRequestStatsTests extends AbstractSerializingTestCase<ForecastRequestStats> {
@Override
protected ForecastRequestStats parseInstance(XContentParser parser) {
return ForecastRequestStats.PARSER.apply(parser, null);
}
@Override
protected ForecastRequestStats createTestInstance() {
return createTestInstance("ForecastRequestStatsTest", randomNonNegativeLong());
}
public ForecastRequestStats createTestInstance(String jobId, long forecastId) {
ForecastRequestStats forecastRequestStats = new ForecastRequestStats(jobId, forecastId);
if (randomBoolean()) {
forecastRequestStats.setRecordCount(randomLong());
}
if (randomBoolean()) {
forecastRequestStats.setMessage(randomAlphaOfLengthBetween(1, 20));
}
if (randomBoolean()) {
forecastRequestStats.setStartTimeStamp(Instant.ofEpochMilli(randomNonNegativeLong()));
}
if (randomBoolean()) {
forecastRequestStats.setEndTimeStamp(Instant.ofEpochMilli(randomNonNegativeLong()));
}
if (randomBoolean()) {
forecastRequestStats.setProgress(randomDouble());
}
if (randomBoolean()) {
forecastRequestStats.setProcessingTime(randomNonNegativeLong());
}
if (randomBoolean()) {
forecastRequestStats.setStatus(randomFrom(ForecastRequestStatus.values()));
}
return forecastRequestStats;
}
@Override
protected Reader<ForecastRequestStats> instanceReader() {
return ForecastRequestStats::new;
}
@Override
protected ForecastRequestStats doParseInstance(XContentParser parser) throws IOException {
return ForecastRequestStats.PARSER.apply(parser, null);
}
}

View File

@ -1,46 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.results;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractSerializingTestCase;
import java.io.IOException;
public class ForecastStatsTests extends AbstractSerializingTestCase<ForecastStats> {
@Override
protected ForecastStats parseInstance(XContentParser parser) {
return ForecastStats.PARSER.apply(parser, null);
}
@Override
protected ForecastStats createTestInstance() {
return createTestInstance("ForecastStatsTest", randomNonNegativeLong());
}
public ForecastStats createTestInstance(String jobId, long forecastId) {
ForecastStats forecastStats = new ForecastStats(jobId, forecastId);
if (randomBoolean()) {
forecastStats.setRecordCount(randomLong());
}
return forecastStats;
}
@Override
protected Reader<ForecastStats> instanceReader() {
return ForecastStats::new;
}
@Override
protected ForecastStats doParseInstance(XContentParser parser) throws IOException {
return ForecastStats.PARSER.apply(parser, null);
}
}

View File

@ -11,12 +11,16 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsIndices;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsNodes;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ClusterAdminClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.license.License;
import org.elasticsearch.license.LicenseService;
import org.elasticsearch.xpack.action.XPackUsageAction;
@ -82,6 +86,40 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
verify(nodes).isLocalNodeElectedMaster();
}
public void testDoAPMIndicesExistReturnsBasedOnIndices() {
final boolean apmIndicesExist = randomBoolean();
final Index[] indices = new Index[apmIndicesExist ? randomIntBetween(1, 3) : 0];
final IndexNameExpressionResolver resolver = mock(IndexNameExpressionResolver.class);
when(resolver.concreteIndices(clusterState, IndicesOptions.lenientExpandOpen(), "apm-*")).thenReturn(indices);
final ClusterStatsCollector collector =
new ClusterStatsCollector(Settings.EMPTY, clusterService, licenseState, client, licenseService, resolver);
assertThat(collector.doAPMIndicesExist(clusterState), is(apmIndicesExist));
}
public void testDoAPMIndicesExistReturnsFalseForExpectedExceptions() {
final Exception exception = randomFrom(new IndexNotFoundException("TEST - expected"), new IllegalArgumentException());
final IndexNameExpressionResolver resolver = mock(IndexNameExpressionResolver.class);
when(resolver.concreteIndices(clusterState, IndicesOptions.lenientExpandOpen(), "apm-*")).thenThrow(exception);
final ClusterStatsCollector collector =
new ClusterStatsCollector(Settings.EMPTY, clusterService, licenseState, client, licenseService, resolver);
assertThat(collector.doAPMIndicesExist(clusterState), is(false));
}
public void testDoAPMIndicesExistRethrowsUnexpectedExceptions() {
final RuntimeException exception = new RuntimeException();
final IndexNameExpressionResolver resolver = mock(IndexNameExpressionResolver.class);
when(resolver.concreteIndices(clusterState, IndicesOptions.lenientExpandOpen(), "apm-*")).thenThrow(exception);
final ClusterStatsCollector collector =
new ClusterStatsCollector(Settings.EMPTY, clusterService, licenseState, client, licenseService, resolver);
expectThrows(RuntimeException.class, () -> collector.doAPMIndicesExist(clusterState));
}
public void testDoCollect() throws Exception {
final Settings.Builder settings = Settings.builder();
final License.OperationMode mode =
@ -161,6 +199,11 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
final Client client = mock(Client.class);
when(client.admin()).thenReturn(adminClient);
final IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class);
final boolean apmIndicesExist = randomBoolean();
final Index[] indices = new Index[apmIndicesExist ? randomIntBetween(1, 5) : 0];
when(indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.lenientExpandOpen(), "apm-*")).thenReturn(indices);
final XPackUsageResponse xPackUsageResponse = new XPackUsageResponse(singletonList(new LogstashFeatureSet.Usage(true, true)));
@SuppressWarnings("unchecked")
@ -169,7 +212,9 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
when(xPackUsageFuture.actionGet()).thenReturn(xPackUsageResponse);
final ClusterStatsCollector collector =
new ClusterStatsCollector(settings.build(), clusterService, licenseState, client, licenseService);
new ClusterStatsCollector(settings.build(), clusterService, licenseState, client, licenseService,
indexNameExpressionResolver);
assertEquals(timeout, collector.getCollectionTimeout());
final long interval = randomNonNegativeLong();
@ -201,6 +246,7 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
assertThat(document.getClusterStats().getStatus(), equalTo(clusterStatus));
assertThat(document.getClusterStats().getIndicesStats().getIndexCount(), equalTo(nbIndices));
assertThat(document.getAPMIndicesExist(), is(apmIndicesExist));
assertThat(document.getUsages(), hasSize(1));
assertThat(document.getUsages().iterator().next().name(), equalTo(Logstash.NAME));

View File

@ -5,7 +5,6 @@
*/
package org.elasticsearch.xpack.monitoring.collector.cluster;
import java.util.Map;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
@ -54,6 +53,7 @@ import org.junit.Before;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
@ -77,6 +77,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
private ClusterState clusterState;
private License license;
private final boolean needToEnableTLS = randomBoolean();
private final boolean apmIndicesExist = randomBoolean();
@Override
@Before
@ -112,7 +113,8 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
protected ClusterStatsMonitoringDoc createMonitoringDoc(String cluster, long timestamp, long interval, MonitoringDoc.Node node,
MonitoredSystem system, String type, String id) {
return new ClusterStatsMonitoringDoc(cluster, timestamp, interval, node,
clusterName, version, clusterStatus, license, usages, clusterStats, clusterState,
clusterName, version, clusterStatus, license,
apmIndicesExist, usages, clusterStats, clusterState,
needToEnableTLS);
}
@ -126,6 +128,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
assertThat(document.getVersion(), equalTo(version));
assertThat(document.getStatus(), equalTo(clusterStatus));
assertThat(document.getLicense(), equalTo(license));
assertThat(document.getAPMIndicesExist(), is(apmIndicesExist));
assertThat(document.getUsages(), is(usages));
assertThat(document.getClusterStats(), is(clusterStats));
assertThat(document.getClusterState(), is(clusterState));
@ -134,21 +137,21 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
public void testConstructorClusterNameMustNotBeNull() {
expectThrows(NullPointerException.class,
() -> new ClusterStatsMonitoringDoc(cluster, timestamp, interval, node,
null, version, clusterStatus, license, usages, clusterStats, clusterState,
null, version, clusterStatus, license, apmIndicesExist, usages, clusterStats, clusterState,
needToEnableTLS));
}
public void testConstructorVersionMustNotBeNull() {
expectThrows(NullPointerException.class,
() -> new ClusterStatsMonitoringDoc(cluster, timestamp, interval, node,
clusterName, null, clusterStatus, license, usages, clusterStats, clusterState,
clusterName, null, clusterStatus, license, apmIndicesExist, usages, clusterStats, clusterState,
needToEnableTLS));
}
public void testConstructorClusterHealthStatusMustNotBeNull() {
expectThrows(NullPointerException.class,
() -> new ClusterStatsMonitoringDoc(cluster, timestamp, interval, node,
clusterName, version, null, license, usages, clusterStats, clusterState,
clusterName, version, null, license, apmIndicesExist, usages, clusterStats, clusterState,
needToEnableTLS));
}
@ -175,13 +178,13 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
}
final DiscoveryNodes nodes = builder.build();
String ephemeralIds = "";
StringBuilder ephemeralIds = new StringBuilder();
for (final DiscoveryNode node : nodes) {
ephemeralIds += node.getEphemeralId();
ephemeralIds.append(node.getEphemeralId());
}
assertThat(ClusterStatsMonitoringDoc.nodesHash(nodes), equalTo(ephemeralIds.hashCode()));
assertThat(ClusterStatsMonitoringDoc.nodesHash(nodes), equalTo(ephemeralIds.toString().hashCode()));
}
public void testHash() {
@ -250,7 +253,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
.maxNodes(2)
.build();
List<XPackFeatureSet.Usage> usages = singletonList(new LogstashFeatureSet.Usage(false, true));
final List<XPackFeatureSet.Usage> usages = singletonList(new LogstashFeatureSet.Usage(false, true));
final NodeInfo mockNodeInfo = mock(NodeInfo.class);
when(mockNodeInfo.getVersion()).thenReturn(Version.V_6_0_0_alpha2);
@ -342,6 +345,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
"_version",
ClusterHealthStatus.GREEN,
license,
apmIndicesExist,
usages,
clusterStats,
clusterState,
@ -542,6 +546,9 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
+ "}"
+ "},"
+ "\"stack_stats\":{"
+ "\"apm\":{"
+ "\"found\":" + apmIndicesExist
+ "},"
+ "\"xpack\":{"
+ "\"logstash\":{"
+ "\"available\":false,"

View File

@ -190,8 +190,10 @@ public class MonitoringIT extends ESRestTestCase {
*/
@SuppressWarnings("unchecked")
public void testMonitoringService() throws Exception {
final boolean createAPMIndex = randomBoolean();
final String indexName = createAPMIndex ? "apm-2017.11.06" : "books";
final HttpEntity document = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON);
assertThat(client().performRequest("POST", "/books/book/0", singletonMap("refresh", "true"), document)
assertThat(client().performRequest("POST", "/" + indexName + "/doc/0", singletonMap("refresh", "true"), document)
.getStatusLine().getStatusCode(), equalTo(HttpStatus.SC_CREATED));
whenExportersAreReady(() -> {
@ -209,7 +211,7 @@ public class MonitoringIT extends ESRestTestCase {
final String type = (String) extractValue("_source.type", searchHit);
if (ClusterStatsMonitoringDoc.TYPE.equals(type)) {
assertClusterStatsMonitoringDoc(searchHit, collectionInterval);
assertClusterStatsMonitoringDoc(searchHit, collectionInterval, createAPMIndex);
} else if (IndexRecoveryMonitoringDoc.TYPE.equals(type)) {
assertIndexRecoveryMonitoringDoc(searchHit, collectionInterval);
} else if (IndicesStatsMonitoringDoc.TYPE.equals(type)) {
@ -294,7 +296,9 @@ public class MonitoringIT extends ESRestTestCase {
* Assert that a {@link ClusterStatsMonitoringDoc} contains the expected information
*/
@SuppressWarnings("unchecked")
private static void assertClusterStatsMonitoringDoc(final Map<String, Object> document, final TimeValue interval) throws Exception {
private static void assertClusterStatsMonitoringDoc(final Map<String, Object> document,
final TimeValue interval,
final boolean apmIndicesExist) throws Exception {
assertMonitoringDoc(document, MonitoredSystem.ES, ClusterStatsMonitoringDoc.TYPE, interval);
final Map<String, Object> source = (Map<String, Object>) document.get("_source");
@ -335,7 +339,13 @@ public class MonitoringIT extends ESRestTestCase {
final Map<String, Object> stackStats = (Map<String, Object>) source.get("stack_stats");
assertThat(stackStats, notNullValue());
assertThat(stackStats.size(), equalTo(1));
assertThat(stackStats.size(), equalTo(2));
final Map<String, Object> apm = (Map<String, Object>) stackStats.get("apm");
assertThat(apm, notNullValue());
assertThat(apm.size(), equalTo(1));
assertThat(apm.remove("found"), is(apmIndicesExist));
assertThat(apm.isEmpty(), is(true));
final Map<String, Object> xpackStats = (Map<String, Object>) stackStats.get("xpack");
assertThat(xpackStats, notNullValue());

View File

@ -275,7 +275,7 @@ public class PkiRealmTests extends ESTestCase {
List<Setting<?>> settingList = new ArrayList<>();
RealmSettings.addSettings(settingList, Collections.emptyList());
ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(settingList));
clusterSettings.validate(settings);
clusterSettings.validate(settings, false);
assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword });
}