Merge branch 'master' into eight_point_three
This commit is contained in:
commit
5e87801a4b
|
@ -82,7 +82,7 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client].
|
|||
|
||||
* https://github.com/searchbox-io/Jest[Jest]:
|
||||
Java Rest client.
|
||||
* There is of course the http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current/index.html[native ES Java client]
|
||||
* There is of course the {client}/java-api/current/index.html[native ES Java client]
|
||||
|
||||
[[community-javascript]]
|
||||
=== JavaScript
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
= Community Supported Clients
|
||||
|
||||
:client: http://www.elasticsearch.org/guide/en/elasticsearch/client
|
||||
:client: http://www.elastic.co/guide/en/elasticsearch/client
|
||||
|
||||
|
||||
include::clients.asciidoc[]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
= Groovy API
|
||||
:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current
|
||||
:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current
|
||||
:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current
|
||||
:java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current
|
||||
|
||||
[preface]
|
||||
== Preface
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[[java-api]]
|
||||
= Java API
|
||||
:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current
|
||||
:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current
|
||||
|
||||
[preface]
|
||||
== Preface
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
= elasticsearch-js
|
||||
|
||||
== Overview
|
||||
|
||||
Official low-level client for Elasticsearch. Its goal is to provide common
|
||||
ground for all Elasticsearch-related code in JavaScript; because of this it tries
|
||||
to be opinion-free and very extendable.
|
||||
|
||||
The full documentation is available at http://elasticsearch.github.io/elasticsearch-js
|
||||
|
||||
|
||||
=== Getting the Node.js module
|
||||
|
||||
To install the module into an existing Node.js project use npm:
|
||||
|
||||
[source,sh]
|
||||
------------------------------------
|
||||
npm install elasticsearch
|
||||
------------------------------------
|
||||
|
||||
=== Getting the browser client
|
||||
|
||||
For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment:
|
||||
|
||||
* elasticsearch.jquery.js - for projects that already use jQuery
|
||||
* elasticsearch.angular.js - for Angular projects
|
||||
* elasticsearch.js - generic build for all other projects
|
||||
|
||||
Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate.
|
||||
|
||||
=== Setting up the client
|
||||
|
||||
Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs].
|
||||
|
||||
[source,javascript]
|
||||
------------------------------------
|
||||
var elasticsearch = require('elasticsearch');
|
||||
|
||||
// Connect to localhost:9200 and use the default settings
|
||||
var client = new elasticsearch.Client();
|
||||
|
||||
// Connect the client to two nodes, requests will be
|
||||
// load-balanced between them using round-robin
|
||||
var client = elasticsearch.Client({
|
||||
hosts: [
|
||||
'elasticsearch1:9200',
|
||||
'elasticsearch2:9200'
|
||||
]
|
||||
});
|
||||
|
||||
// Connect to the this host's cluster, sniff
|
||||
// for the rest of the cluster right away, and
|
||||
// again every 5 minutes
|
||||
var client = elasticsearch.Client({
|
||||
host: 'elasticsearch1:9200',
|
||||
sniffOnStart: true,
|
||||
sniffInterval: 300000
|
||||
});
|
||||
|
||||
// Connect to this host using https, basic auth,
|
||||
// a path prefix, and static query string values
|
||||
var client = new elasticsearch.Client({
|
||||
host: 'https://user:password@elasticsearch1/search?app=blog'
|
||||
});
|
||||
------------------------------------
|
||||
|
||||
|
||||
=== Setting up the client in the browser
|
||||
|
||||
The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build.
|
||||
|
||||
[source,javascript]
|
||||
------------------------------------
|
||||
// elasticsearch.js adds the elasticsearch namespace to the window
|
||||
var client = elasticsearch.Client({ ... });
|
||||
|
||||
// elasticsearch.jquery.js adds the es namespace to the jQuery object
|
||||
var client = jQuery.es.Client({ ... });
|
||||
|
||||
// elasticsearch.angular.js creates an elasticsearch
|
||||
// module, which provides an esFactory
|
||||
var app = angular.module('app', ['elasticsearch']);
|
||||
app.service('es', function (esFactory) {
|
||||
return esFactory({ ... });
|
||||
});
|
||||
------------------------------------
|
||||
|
||||
=== Using the client instance to make API calls.
|
||||
|
||||
Once you create the client, making API calls is simple.
|
||||
|
||||
[source,javascript]
|
||||
------------------------------------
|
||||
// get the current status of the entire cluster.
|
||||
// Note: params are always optional, you can just send a callback
|
||||
client.cluster.health(function (err, resp) {
|
||||
if (err) {
|
||||
console.error(err.message);
|
||||
} else {
|
||||
console.dir(resp);
|
||||
}
|
||||
});
|
||||
|
||||
// index a document
|
||||
client.index({
|
||||
index: 'blog',
|
||||
type: 'post',
|
||||
id: 1,
|
||||
body: {
|
||||
title: 'JavaScript Everywhere!',
|
||||
content: 'It all started when...',
|
||||
date: '2013-12-17'
|
||||
}
|
||||
}, function (err, resp) {
|
||||
// ...
|
||||
});
|
||||
|
||||
// search for documents (and also promises!!)
|
||||
client.search({
|
||||
index: 'users',
|
||||
size: 50,
|
||||
body: {
|
||||
query: {
|
||||
match: {
|
||||
profile: 'elasticsearch'
|
||||
}
|
||||
}
|
||||
}
|
||||
}).then(function (resp) {
|
||||
var hits = resp.body.hits;
|
||||
});
|
||||
------------------------------------
|
||||
|
||||
== Copyright and License
|
||||
|
||||
This software is Copyright (c) 2013-2015 by Elasticsearch BV.
|
||||
|
||||
This is free software, licensed under The Apache License Version 2.0.
|
|
@ -104,13 +104,13 @@ java -version
|
|||
echo $JAVA_HOME
|
||||
--------------------------------------------------
|
||||
|
||||
Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elasticsearch.org/download[`www.elasticsearch.org/download`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file.
|
||||
Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file.
|
||||
|
||||
Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package):
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
curl -L -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz
|
||||
curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz
|
||||
--------------------------------------------------
|
||||
|
||||
Then extract it as follows (Windows users should unzip the zip package):
|
||||
|
@ -868,7 +868,7 @@ In the previous section, we skipped over a little detail called the document sco
|
|||
All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <<query-dsl-filters,filters>. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons:
|
||||
|
||||
* Filters do not score so they are faster to execute than queries
|
||||
* Filters can be http://www.elasticsearch.org/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries
|
||||
* Filters can be http://www.elastic.co/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries
|
||||
|
||||
To understand filters, let's first introduce the <<query-dsl-filtered-query,`filtered` query>>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <<query-dsl-range-filter,`range` filter>>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering.
|
||||
|
||||
|
|
|
@ -362,7 +362,7 @@ in the query string.
|
|||
=== Percolator
|
||||
|
||||
The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator,
|
||||
but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator]
|
||||
but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator]
|
||||
blog post for the reasons why the percolator has been redesigned.
|
||||
|
||||
Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries
|
||||
|
|
|
@ -26,7 +26,7 @@ plugin --install <org>/<user/component>/<version>
|
|||
-----------------------------------
|
||||
|
||||
The plugins will be
|
||||
automatically downloaded in this case from `download.elasticsearch.org`,
|
||||
automatically downloaded in this case from `download.elastic.co`,
|
||||
and in case they don't exist there, from maven (central and sonatype).
|
||||
|
||||
Note that when the plugin is located in maven central or sonatype
|
||||
|
|
|
@ -52,6 +52,8 @@ include::queries/range-query.asciidoc[]
|
|||
|
||||
include::queries/regexp-query.asciidoc[]
|
||||
|
||||
include::queries/span-containing-query.asciidoc[]
|
||||
|
||||
include::queries/span-first-query.asciidoc[]
|
||||
|
||||
include::queries/span-multi-term-query.asciidoc[]
|
||||
|
@ -64,6 +66,8 @@ include::queries/span-or-query.asciidoc[]
|
|||
|
||||
include::queries/span-term-query.asciidoc[]
|
||||
|
||||
include::queries/span-within-query.asciidoc[]
|
||||
|
||||
include::queries/term-query.asciidoc[]
|
||||
|
||||
include::queries/terms-query.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
[[query-dsl-span-containing-query]]
|
||||
=== Span Containing Query
|
||||
|
||||
Returns matches which enclose another span query. The span containing
|
||||
query maps to Lucene `SpanContainingQuery`. Here is an example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"span_containing" : {
|
||||
"little" : {
|
||||
"span_term" : { "field1" : "foo" }
|
||||
},
|
||||
"big" : {
|
||||
"span_near" : {
|
||||
"clauses" : [
|
||||
{ "span_term" : { "field1" : "bar" } },
|
||||
{ "span_term" : { "field1" : "baz" } }
|
||||
],
|
||||
"slop" : 5,
|
||||
"in_order" : true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The `big` and `little` clauses can be any span type query. Matching
|
||||
spans from `big` that contain matches from `little` are returned.
|
|
@ -0,0 +1,29 @@
|
|||
[[query-dsl-span-within-query]]
|
||||
=== Span Within Query
|
||||
|
||||
Returns matches which are enclosed inside another span query. The span within
|
||||
query maps to Lucene `SpanWithinQuery`. Here is an example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"span_within" : {
|
||||
"little" : {
|
||||
"span_term" : { "field1" : "foo" }
|
||||
},
|
||||
"big" : {
|
||||
"span_near" : {
|
||||
"clauses" : [
|
||||
{ "span_term" : { "field1" : "bar" } },
|
||||
{ "span_term" : { "field1" : "baz" } }
|
||||
],
|
||||
"slop" : 5,
|
||||
"in_order" : true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The `big` and `little` clauses can be any span type query. Matching
|
||||
spans from `little` that are enclosed within `big` are returned.
|
|
@ -4,7 +4,7 @@
|
|||
[partintro]
|
||||
--
|
||||
This section includes information on how to setup *elasticsearch* and
|
||||
get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and
|
||||
get it running. If you haven't already, http://www.elastic.co/downloads[download] it, and
|
||||
then check the <<setup-installation,installation>> docs.
|
||||
|
||||
NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`.
|
||||
|
|
|
@ -22,14 +22,14 @@ Download and install the Public Signing Key:
|
|||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
wget -qO - https://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -
|
||||
wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
|
||||
--------------------------------------------------
|
||||
|
||||
Add the repository definition to your `/etc/apt/sources.list` file:
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
echo "deb http://packages.elasticsearch.org/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list
|
||||
--------------------------------------------------
|
||||
|
||||
[WARNING]
|
||||
|
@ -65,7 +65,7 @@ Download and install the public signing key:
|
|||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch
|
||||
rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
--------------------------------------------------
|
||||
|
||||
Add the following in your `/etc/yum.repos.d/` directory
|
||||
|
@ -75,9 +75,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo`
|
|||
--------------------------------------------------
|
||||
[elasticsearch-{branch}]
|
||||
name=Elasticsearch repository for {branch}.x packages
|
||||
baseurl=http://packages.elasticsearch.org/elasticsearch/{branch}/centos
|
||||
baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos
|
||||
gpgcheck=1
|
||||
gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
|
||||
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
enabled=1
|
||||
--------------------------------------------------
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ $ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
|
|||
[float]
|
||||
==== 1.0 and later
|
||||
|
||||
To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here].
|
||||
To back up a running 1.0 or later system, it is simplest to use the snapshot feature. See the complete instructions for <<modules-snapshots,backup and restore with snapshots>>.
|
||||
|
||||
[float]
|
||||
[[rolling-upgrades]]
|
||||
|
@ -96,7 +96,7 @@ This syntax applies to Elasticsearch 1.0 and later:
|
|||
|
||||
* Confirm that all shards are correctly reallocated to the remaining running nodes.
|
||||
|
||||
* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org:
|
||||
* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elastic.co:
|
||||
** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration.
|
||||
** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved.
|
||||
** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used.
|
||||
|
|
|
@ -22,10 +22,10 @@ improvements throughout this page to provide the full context.
|
|||
|
||||
If you’re interested in more on how we approach ensuring resiliency in
|
||||
Elasticsearch, you may be interested in Igor Motov’s recent talk
|
||||
http://www.elasticsearch.org/videos/improving-elasticsearch-resiliency/[Improving Elasticsearch Resiliency].
|
||||
http://www.elastic.co/videos/improving-elasticsearch-resiliency[Improving Elasticsearch Resiliency].
|
||||
|
||||
You may also be interested in our blog post
|
||||
http://www.elasticsearch.org/blog/resiliency-elasticsearch/[Resiliency in Elasticsearch],
|
||||
http://www.elastic.co/blog/resiliency-elasticsearch[Resiliency in Elasticsearch],
|
||||
which details our thought processes when addressing resiliency in both
|
||||
Elasticsearch and the work our developers do upstream in Apache Lucene.
|
||||
|
||||
|
@ -416,7 +416,7 @@ The Snapshot/Restore API supports a number of different repository types for sto
|
|||
[float]
|
||||
=== Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0)
|
||||
|
||||
Currently, the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0.
|
||||
Currently, the https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0.
|
||||
|
||||
[float]
|
||||
=== Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0)
|
||||
|
|
6
pom.xml
6
pom.xml
|
@ -267,6 +267,12 @@
|
|||
<artifactId>jackson-dataformat-yaml</artifactId>
|
||||
<version>2.5.1</version>
|
||||
<scope>compile</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
|
@ -59,6 +58,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
|
|||
*/
|
||||
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
|
||||
|
||||
public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold";
|
||||
public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval";
|
||||
|
||||
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
|
@ -74,6 +76,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private final TimeValue reconnectInterval;
|
||||
|
||||
private TimeValue slowTaskLoggingThreshold;
|
||||
|
||||
private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor;
|
||||
|
||||
/**
|
||||
|
@ -115,8 +119,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
this.clusterState = ClusterState.builder(clusterName).build();
|
||||
|
||||
this.nodeSettingsService.setClusterService(this);
|
||||
this.nodeSettingsService.addListener(new ApplySettings());
|
||||
|
||||
this.reconnectInterval = this.settings.getAsTime("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10));
|
||||
this.reconnectInterval = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL, TimeValue.timeValueSeconds(10));
|
||||
|
||||
this.slowTaskLoggingThreshold = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, TimeValue.timeValueSeconds(30));
|
||||
|
||||
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
|
||||
|
||||
|
@ -371,22 +378,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
return;
|
||||
}
|
||||
ClusterState newClusterState;
|
||||
long startTime = System.currentTimeMillis();
|
||||
try {
|
||||
newClusterState = updateTask.execute(previousClusterState);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
updateTask.onFailure(source, e);
|
||||
return;
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
logger.debug("processing [{}]: no change in cluster_state", source);
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
|
||||
|
@ -394,6 +403,9 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
|
||||
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -511,9 +523,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
logger.debug("processing [{}]: done applying updated cluster_state (version: {}, uuid: {})", source, newClusterState.version(), newClusterState.uuid());
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.uuid());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n");
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, System.currentTimeMillis() - startTime));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
|
||||
|
@ -523,6 +538,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
|
||||
if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) {
|
||||
logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold);
|
||||
}
|
||||
}
|
||||
|
||||
class NotifyTimeout implements Runnable {
|
||||
final TimeoutClusterStateListener listener;
|
||||
final TimeValue timeout;
|
||||
|
@ -755,4 +776,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
class ApplySettings implements NodeSettingsService.Listener {
|
||||
@Override
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
final TimeValue slowTaskLoggingThreshold = settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, InternalClusterService.this.slowTaskLoggingThreshold);
|
||||
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.*;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
|
@ -101,6 +102,7 @@ public class ClusterDynamicSettingsModule extends AbstractModule {
|
|||
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
|
||||
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
|
||||
clusterDynamicSettings.addDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
|
||||
clusterDynamicSettings.addDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE);
|
||||
}
|
||||
|
||||
public void addDynamicSettings(String... settings) {
|
||||
|
|
|
@ -318,6 +318,16 @@ public abstract class QueryBuilders {
|
|||
return new SpanOrQueryBuilder();
|
||||
}
|
||||
|
||||
/** Creates a new {@code span_within} builder. */
|
||||
public static SpanWithinQueryBuilder spanWithinQuery() {
|
||||
return new SpanWithinQueryBuilder();
|
||||
}
|
||||
|
||||
/** Creates a new {@code span_containing} builder. */
|
||||
public static SpanContainingQueryBuilder spanContainingQuery() {
|
||||
return new SpanContainingQueryBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link SpanQueryBuilder} which allows having a sub query
|
||||
* which implements {@link MultiTermQueryBuilder}. This is useful for
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Builder for {@link SpanContainingQuery}.
|
||||
*/
|
||||
public class SpanContainingQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanContainingQueryBuilder> {
|
||||
|
||||
private SpanQueryBuilder big;
|
||||
private SpanQueryBuilder little;
|
||||
private float boost = -1;
|
||||
private String queryName;
|
||||
|
||||
/**
|
||||
* Sets the little clause, it must be contained within {@code big} for a match.
|
||||
*/
|
||||
public SpanContainingQueryBuilder little(SpanQueryBuilder clause) {
|
||||
this.little = clause;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the big clause, it must enclose {@code little} for a match.
|
||||
*/
|
||||
public SpanContainingQueryBuilder big(SpanQueryBuilder clause) {
|
||||
this.big = clause;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SpanContainingQueryBuilder boost(float boost) {
|
||||
this.boost = boost;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the query name for the filter that can be used when searching for matched_filters per hit.
|
||||
*/
|
||||
public SpanContainingQueryBuilder queryName(String queryName) {
|
||||
this.queryName = queryName;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (big == null) {
|
||||
throw new IllegalArgumentException("Must specify big clause when building a span_containing query");
|
||||
}
|
||||
if (little == null) {
|
||||
throw new IllegalArgumentException("Must specify little clause when building a span_containing query");
|
||||
}
|
||||
builder.startObject(SpanContainingQueryParser.NAME);
|
||||
|
||||
builder.field("big");
|
||||
big.toXContent(builder, params);
|
||||
|
||||
builder.field("little");
|
||||
little.toXContent(builder, params);
|
||||
|
||||
if (boost != -1) {
|
||||
builder.field("boost", boost);
|
||||
}
|
||||
|
||||
if (queryName != null) {
|
||||
builder.field("_name", queryName);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.spans.SpanContainingQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Parser for {@link SpanContainingQuery}
|
||||
*/
|
||||
public class SpanContainingQueryParser implements QueryParser {
|
||||
|
||||
public static final String NAME = "span_containing";
|
||||
|
||||
@Inject
|
||||
public SpanContainingQueryParser() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] names() {
|
||||
return new String[]{NAME, Strings.toCamelCase(NAME)};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
float boost = 1.0f;
|
||||
String queryName = null;
|
||||
SpanQuery big = null;
|
||||
SpanQuery little = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("big".equals(currentFieldName)) {
|
||||
Query query = parseContext.parseInnerQuery();
|
||||
if (!(query instanceof SpanQuery)) {
|
||||
throw new QueryParsingException(parseContext, "span_containing [big] must be of type span query");
|
||||
}
|
||||
big = (SpanQuery) query;
|
||||
} else if ("little".equals(currentFieldName)) {
|
||||
Query query = parseContext.parseInnerQuery();
|
||||
if (!(query instanceof SpanQuery)) {
|
||||
throw new QueryParsingException(parseContext, "span_containing [little] must be of type span query");
|
||||
}
|
||||
little = (SpanQuery) query;
|
||||
} else {
|
||||
throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if ("boost".equals(currentFieldName)) {
|
||||
boost = parser.floatValue();
|
||||
} else if ("_name".equals(currentFieldName)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (big == null) {
|
||||
throw new QueryParsingException(parseContext, "span_containing must include [big]");
|
||||
}
|
||||
if (little == null) {
|
||||
throw new QueryParsingException(parseContext, "span_containing must include [little]");
|
||||
}
|
||||
|
||||
Query query = new SpanContainingQuery(big, little);
|
||||
query.setBoost(boost);
|
||||
if (queryName != null) {
|
||||
parseContext.addNamedQuery(queryName, query);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Builder for {@link SpanWithinQuery}.
|
||||
*/
|
||||
public class SpanWithinQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder<SpanWithinQueryBuilder> {
|
||||
|
||||
private SpanQueryBuilder big;
|
||||
private SpanQueryBuilder little;
|
||||
private float boost = -1;
|
||||
private String queryName;
|
||||
|
||||
/**
|
||||
* Sets the little clause, it must be contained within {@code big} for a match.
|
||||
*/
|
||||
public SpanWithinQueryBuilder little(SpanQueryBuilder clause) {
|
||||
this.little = clause;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the big clause, it must enclose {@code little} for a match.
|
||||
*/
|
||||
public SpanWithinQueryBuilder big(SpanQueryBuilder clause) {
|
||||
this.big = clause;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SpanWithinQueryBuilder boost(float boost) {
|
||||
this.boost = boost;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the query name for the filter that can be used when searching for matched_filters per hit.
|
||||
*/
|
||||
public SpanWithinQueryBuilder queryName(String queryName) {
|
||||
this.queryName = queryName;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (big == null) {
|
||||
throw new IllegalArgumentException("Must specify big clause when building a span_within query");
|
||||
}
|
||||
if (little == null) {
|
||||
throw new IllegalArgumentException("Must specify little clause when building a span_within query");
|
||||
}
|
||||
builder.startObject(SpanWithinQueryParser.NAME);
|
||||
|
||||
builder.field("big");
|
||||
big.toXContent(builder, params);
|
||||
|
||||
builder.field("little");
|
||||
little.toXContent(builder, params);
|
||||
|
||||
if (boost != -1) {
|
||||
builder.field("boost", boost);
|
||||
}
|
||||
|
||||
if (queryName != null) {
|
||||
builder.field("_name", queryName);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanWithinQuery;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Parser for {@link SpanWithinQuery}
|
||||
*/
|
||||
public class SpanWithinQueryParser implements QueryParser {
|
||||
|
||||
public static final String NAME = "span_within";
|
||||
|
||||
@Inject
|
||||
public SpanWithinQueryParser() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] names() {
|
||||
return new String[]{NAME, Strings.toCamelCase(NAME)};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
float boost = 1.0f;
|
||||
String queryName = null;
|
||||
SpanQuery big = null;
|
||||
SpanQuery little = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("big".equals(currentFieldName)) {
|
||||
Query query = parseContext.parseInnerQuery();
|
||||
if (query instanceof SpanQuery == false) {
|
||||
throw new QueryParsingException(parseContext, "span_within [big] must be of type span query");
|
||||
}
|
||||
big = (SpanQuery) query;
|
||||
} else if ("little".equals(currentFieldName)) {
|
||||
Query query = parseContext.parseInnerQuery();
|
||||
if (query instanceof SpanQuery == false) {
|
||||
throw new QueryParsingException(parseContext, "span_within [little] must be of type span query");
|
||||
}
|
||||
little = (SpanQuery) query;
|
||||
} else {
|
||||
throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else if ("boost".equals(currentFieldName)) {
|
||||
boost = parser.floatValue();
|
||||
} else if ("_name".equals(currentFieldName)) {
|
||||
queryName = parser.text();
|
||||
} else {
|
||||
throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (big == null) {
|
||||
throw new QueryParsingException(parseContext, "span_within must include [big]");
|
||||
}
|
||||
if (little == null) {
|
||||
throw new QueryParsingException(parseContext, "span_within must include [little]");
|
||||
}
|
||||
|
||||
Query query = new SpanWithinQuery(big, little);
|
||||
query.setBoost(boost);
|
||||
if (queryName != null) {
|
||||
parseContext.addNamedQuery(queryName, query);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
}
|
|
@ -89,6 +89,8 @@ public class IndicesQueriesModule extends AbstractModule {
|
|||
qpBinders.addBinding().to(ConstantScoreQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanTermQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanNotQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanWithinQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanContainingQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(FieldMaskingSpanQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanFirstQueryParser.class).asEagerSingleton();
|
||||
qpBinders.addBinding().to(SpanNearQueryParser.class).asEagerSingleton();
|
||||
|
|
|
@ -21,10 +21,12 @@ package org.elasticsearch.cluster;
|
|||
import com.google.common.base.Predicate;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
|
@ -38,6 +40,8 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.plugins.AbstractPlugin;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -48,6 +52,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
|
@ -721,6 +726,215 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
|
||||
public void testClusterStateUpdateLogging() throws Exception {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
|
||||
MockLogAppender mockAppender = new MockLogAppender();
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state"));
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *"));
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)"));
|
||||
|
||||
Logger rootLogger = Logger.getRootLogger();
|
||||
rootLogger.addAppender(mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(4);
|
||||
clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
fail();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return ClusterState.builder(currentState).incrementVersion().build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
// Additional update task to make sure all previous logging made it to the logger
|
||||
// We don't check logging for this on since there is no guarantee that it will occur before our check
|
||||
clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
|
||||
} finally {
|
||||
rootLogger.removeAppender(mockAppender);
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
|
||||
public void testLongClusterStateUpdateLogging() throws Exception {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("discovery.type", "local")
|
||||
.put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
|
||||
MockLogAppender mockAppender = new MockLogAppender();
|
||||
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *"));
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms"));
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms"));
|
||||
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms"));
|
||||
|
||||
Logger rootLogger = Logger.getRootLogger();
|
||||
rootLogger.addAppender(mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(5);
|
||||
final CountDownLatch processedFirstTask = new CountDownLatch(1);
|
||||
clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Thread.sleep(100);
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
processedFirstTask.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
|
||||
processedFirstTask.await(1, TimeUnit.SECONDS);
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
|
||||
.put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms")));
|
||||
|
||||
clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Thread.sleep(100);
|
||||
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
fail();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Thread.sleep(100);
|
||||
return ClusterState.builder(currentState).incrementVersion().build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Thread.sleep(100);
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
// Additional update task to make sure all previous logging made it to the logger
|
||||
// We don't check logging for this on since there is no guarantee that it will occur before our check
|
||||
clusterService1.submitStateUpdateTask("test5", new ProcessedClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
|
||||
} finally {
|
||||
rootLogger.removeAppender(mockAppender);
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
}
|
||||
|
||||
private static class BlockingTask extends ClusterStateUpdateTask {
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
|
|
|
@ -51,12 +51,14 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.WildcardQuery;
|
||||
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanContainingQuery;
|
||||
import org.apache.lucene.search.spans.SpanFirstQuery;
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.search.spans.SpanWithinQuery;
|
||||
import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
|
@ -131,11 +133,13 @@ import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.regexpQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanContainingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanFirstQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
|
||||
|
@ -1434,6 +1438,50 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest {
|
|||
assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanWithinQueryBuilder() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
|
||||
new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
|
||||
Query actualQuery = queryParser.parse(spanWithinQuery()
|
||||
.big(spanTermQuery("age", 34))
|
||||
.little(spanTermQuery("age", 35)))
|
||||
.query();
|
||||
assertEquals(expectedQuery, actualQuery);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanWithinQueryParser() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
|
||||
new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
|
||||
String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanWithin.json");
|
||||
Query actualQuery = queryParser.parse(queryText).query();
|
||||
assertEquals(expectedQuery, actualQuery);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanContainingQueryBuilder() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
|
||||
new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
|
||||
Query actualQuery = queryParser.parse(spanContainingQuery()
|
||||
.big(spanTermQuery("age", 34))
|
||||
.little(spanTermQuery("age", 35)))
|
||||
.query();
|
||||
assertEquals(expectedQuery, actualQuery);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanContainingQueryParser() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
|
||||
new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
|
||||
String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanContaining.json");
|
||||
Query actualQuery = queryParser.parse(queryText).query();
|
||||
assertEquals(expectedQuery, actualQuery);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanFirstQueryBuilder() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
span_containing:{
|
||||
big:{
|
||||
span_term:{
|
||||
age:34
|
||||
}
|
||||
},
|
||||
little:{
|
||||
span_term:{
|
||||
age:35
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
span_within:{
|
||||
big:{
|
||||
span_term:{
|
||||
age:34
|
||||
}
|
||||
},
|
||||
little:{
|
||||
span_term:{
|
||||
age:35
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import org.apache.log4j.AppenderSkeleton;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
|
||||
/**
|
||||
* Test appender that can be used to verify that certain events were logged correctly
|
||||
*/
|
||||
public class MockLogAppender extends AppenderSkeleton {
|
||||
|
||||
private final static String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch.");
|
||||
|
||||
private List<LoggingExpectation> expectations;
|
||||
|
||||
public MockLogAppender() {
|
||||
expectations = newArrayList();
|
||||
}
|
||||
|
||||
public void addExpectation(LoggingExpectation expectation) {
|
||||
expectations.add(expectation);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void append(LoggingEvent loggingEvent) {
|
||||
for (LoggingExpectation expectation : expectations) {
|
||||
expectation.match(loggingEvent);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean requiresLayout() {
|
||||
return false;
|
||||
}
|
||||
|
||||
public void assertAllExpectationsMatched() {
|
||||
for (LoggingExpectation expectation : expectations) {
|
||||
expectation.assertMatched();
|
||||
}
|
||||
}
|
||||
|
||||
public interface LoggingExpectation {
|
||||
void match(LoggingEvent loggingEvent);
|
||||
|
||||
void assertMatched();
|
||||
}
|
||||
|
||||
public static abstract class AbstractEventExpectation implements LoggingExpectation {
|
||||
protected final String name;
|
||||
protected final String logger;
|
||||
protected final Level level;
|
||||
protected final String message;
|
||||
protected boolean saw;
|
||||
|
||||
public AbstractEventExpectation(String name, String logger, Level level, String message) {
|
||||
this.name = name;
|
||||
this.logger = getLoggerName(logger);
|
||||
this.level = level;
|
||||
this.message = message;
|
||||
this.saw = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void match(LoggingEvent event) {
|
||||
if (event.getLevel() == level && event.getLoggerName().equals(logger)) {
|
||||
if (Regex.isSimpleMatchPattern(message)) {
|
||||
if (Regex.simpleMatch(message, event.getMessage().toString())) {
|
||||
saw = true;
|
||||
}
|
||||
} else {
|
||||
if (event.getMessage().toString().contains(message)) {
|
||||
saw = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class UnseenEventExpectation extends AbstractEventExpectation {
|
||||
|
||||
public UnseenEventExpectation(String name, String logger, Level level, String message) {
|
||||
super(name, logger, level, message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assertMatched() {
|
||||
assertThat(name, saw, equalTo(false));
|
||||
}
|
||||
}
|
||||
|
||||
public static class SeenEventExpectation extends AbstractEventExpectation {
|
||||
|
||||
public SeenEventExpectation(String name, String logger, Level level, String message) {
|
||||
super(name, logger, level, message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assertMatched() {
|
||||
assertThat(name, saw, equalTo(true));
|
||||
}
|
||||
}
|
||||
|
||||
private static String getLoggerName(String name) {
|
||||
if (name.startsWith("org.elasticsearch.")) {
|
||||
name = name.substring("org.elasticsearch.".length());
|
||||
}
|
||||
return COMMON_PREFIX + name;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue