Merge branch 'master' into no_more_toe_stepping
This commit is contained in:
commit
a0c41218ff
|
@ -42,7 +42,7 @@ h3. Installation
|
||||||
|
|
||||||
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
||||||
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
|
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
|
||||||
* Run @curl -X GET http://127.0.0.1:9200/@.
|
* Run @curl -X GET http://localhost:9200/@.
|
||||||
* Start more servers ...
|
* Start more servers ...
|
||||||
|
|
||||||
h3. Indexing
|
h3. Indexing
|
||||||
|
@ -50,16 +50,16 @@ h3. Indexing
|
||||||
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
|
curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/1' -d '
|
curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T13:12:00",
|
"postDate": "2009-11-15T13:12:00",
|
||||||
"message": "Trying out Elasticsearch, so far so good?"
|
"message": "Trying out Elasticsearch, so far so good?"
|
||||||
}'
|
}'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/2' -d '
|
curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T14:12:12",
|
"postDate": "2009-11-15T14:12:12",
|
||||||
|
@ -70,9 +70,9 @@ curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/2' -d '
|
||||||
Now, let's see if the information was added by GETting it:
|
Now, let's see if the information was added by GETting it:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/user/kimchy?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/1?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/2?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
h3. Searching
|
h3. Searching
|
||||||
|
@ -81,13 +81,13 @@ Mmm search..., shouldn't it be elastic?
|
||||||
Let's find all the tweets that @kimchy@ posted:
|
Let's find all the tweets that @kimchy@ posted:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"match" : { "user": "kimchy" }
|
"match" : { "user": "kimchy" }
|
||||||
|
@ -98,7 +98,7 @@ curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?pretty=true' -d '
|
||||||
Just for kicks, let's get all the documents stored (we should see the user as well):
|
Just for kicks, let's get all the documents stored (we should see the user as well):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
@ -109,7 +109,7 @@ curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
||||||
We can also do range search (the @postDate@ was automatically identified as date)
|
We can also do range search (the @postDate@ was automatically identified as date)
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"range" : {
|
"range" : {
|
||||||
|
@ -130,16 +130,16 @@ Elasticsearch supports multiple indices, as well as multiple types per index. In
|
||||||
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
|
curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/tweet/1' -d '
|
curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T13:12:00",
|
"postDate": "2009-11-15T13:12:00",
|
||||||
"message": "Trying out Elasticsearch, so far so good?"
|
"message": "Trying out Elasticsearch, so far so good?"
|
||||||
}'
|
}'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/tweet/2' -d '
|
curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T14:12:12",
|
"postDate": "2009-11-15T14:12:12",
|
||||||
|
@ -152,7 +152,7 @@ The above will index information into the @kimchy@ index, with two types, @info@
|
||||||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT http://127.0.0.1:9200/another_user/ -d '
|
curl -XPUT http://localhost:9200/another_user/ -d '
|
||||||
{
|
{
|
||||||
"index" : {
|
"index" : {
|
||||||
"numberOfShards" : 1,
|
"numberOfShards" : 1,
|
||||||
|
@ -165,7 +165,7 @@ Search (and similar operations) are multi index aware. This means that we can ea
|
||||||
index (twitter user), for example:
|
index (twitter user), for example:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/kimchy,another_user/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
@ -176,7 +176,7 @@ curl -XGET 'http://127.0.0.1:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||||
Or on all the indices:
|
Or on all the indices:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
|
|
@ -42,7 +42,7 @@ h3. Installation
|
||||||
|
|
||||||
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
|
||||||
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
|
* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
|
||||||
* Run @curl -X GET http://127.0.0.1:9200/@.
|
* Run @curl -X GET http://localhost:9200/@.
|
||||||
* Start more servers ...
|
* Start more servers ...
|
||||||
|
|
||||||
h3. Indexing
|
h3. Indexing
|
||||||
|
@ -50,16 +50,16 @@ h3. Indexing
|
||||||
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
|
curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/1' -d '
|
curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T13:12:00",
|
"postDate": "2009-11-15T13:12:00",
|
||||||
"message": "Trying out Elasticsearch, so far so good?"
|
"message": "Trying out Elasticsearch, so far so good?"
|
||||||
}'
|
}'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/2' -d '
|
curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T14:12:12",
|
"postDate": "2009-11-15T14:12:12",
|
||||||
|
@ -70,9 +70,9 @@ curl -XPUT 'http://127.0.0.1:9200/twitter/tweet/2' -d '
|
||||||
Now, let's see if the information was added by GETting it:
|
Now, let's see if the information was added by GETting it:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/user/kimchy?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/1?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/2?pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
h3. Searching
|
h3. Searching
|
||||||
|
@ -81,13 +81,13 @@ Mmm search..., shouldn't it be elastic?
|
||||||
Let's find all the tweets that @kimchy@ posted:
|
Let's find all the tweets that @kimchy@ posted:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
|
curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"match" : { "user": "kimchy" }
|
"match" : { "user": "kimchy" }
|
||||||
|
@ -98,7 +98,7 @@ curl -XGET 'http://127.0.0.1:9200/twitter/tweet/_search?pretty=true' -d '
|
||||||
Just for kicks, let's get all the documents stored (we should see the user as well):
|
Just for kicks, let's get all the documents stored (we should see the user as well):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
@ -109,7 +109,7 @@ curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
||||||
We can also do range search (the @postDate@ was automatically identified as date)
|
We can also do range search (the @postDate@ was automatically identified as date)
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/twitter/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"range" : {
|
"range" : {
|
||||||
|
@ -130,16 +130,16 @@ Elasticsearch supports multiple indices, as well as multiple types per index. In
|
||||||
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
|
curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/tweet/1' -d '
|
curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T13:12:00",
|
"postDate": "2009-11-15T13:12:00",
|
||||||
"message": "Trying out Elasticsearch, so far so good?"
|
"message": "Trying out Elasticsearch, so far so good?"
|
||||||
}'
|
}'
|
||||||
|
|
||||||
curl -XPUT 'http://127.0.0.1:9200/kimchy/tweet/2' -d '
|
curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
|
||||||
{
|
{
|
||||||
"user": "kimchy",
|
"user": "kimchy",
|
||||||
"postDate": "2009-11-15T14:12:12",
|
"postDate": "2009-11-15T14:12:12",
|
||||||
|
@ -152,7 +152,7 @@ The above will index information into the @kimchy@ index, with two types, @info@
|
||||||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XPUT http://127.0.0.1:9200/another_user/ -d '
|
curl -XPUT http://localhost:9200/another_user/ -d '
|
||||||
{
|
{
|
||||||
"index" : {
|
"index" : {
|
||||||
"numberOfShards" : 1,
|
"numberOfShards" : 1,
|
||||||
|
@ -165,7 +165,7 @@ Search (and similar operations) are multi index aware. This means that we can ea
|
||||||
index (twitter user), for example:
|
index (twitter user), for example:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/kimchy,another_user/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
@ -176,7 +176,7 @@ curl -XGET 'http://127.0.0.1:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||||
Or on all the indices:
|
Or on all the indices:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
curl -XGET 'http://127.0.0.1:9200/_search?pretty=true' -d '
|
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
|
||||||
{
|
{
|
||||||
"query" : {
|
"query" : {
|
||||||
"matchAll" : {}
|
"matchAll" : {}
|
||||||
|
|
|
@ -5,15 +5,14 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch</groupId>
|
<groupId>org.elasticsearch</groupId>
|
||||||
<artifactId>elasticsearch-parent</artifactId>
|
<artifactId>parent</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch</groupId>
|
<groupId>org.elasticsearch</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
|
|
||||||
<packaging>jar</packaging>
|
<name>Elasticsearch: Core</name>
|
||||||
<name>Elasticsearch Core</name>
|
|
||||||
<description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description>
|
<description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.node;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
@ -33,6 +34,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
|
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
|
||||||
|
@ -136,7 +138,7 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||||
* @param version the version of the node.
|
* @param version the version of the node.
|
||||||
*/
|
*/
|
||||||
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map<String, String> attributes, Version version) {
|
public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map<String, String> attributes, Version version) {
|
||||||
this(nodeName, nodeId, NetworkUtils.getLocalHostName(""), NetworkUtils.getLocalHostAddress(""), address, attributes, version);
|
this(nodeName, nodeId, address.getHost(), address.getAddress(), address, attributes, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -40,6 +40,8 @@ import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.text.StringText;
|
import org.elasticsearch.common.text.StringText;
|
||||||
|
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||||
|
import org.elasticsearch.common.transport.TransportAddress;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.*;
|
import org.elasticsearch.common.util.concurrent.*;
|
||||||
import org.elasticsearch.discovery.Discovery;
|
import org.elasticsearch.discovery.Discovery;
|
||||||
|
@ -159,7 +161,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||||
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
||||||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||||
final String nodeId = DiscoveryService.generateNodeId(settings);
|
final String nodeId = DiscoveryService.generateNodeId(settings);
|
||||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, transportService.boundAddress().publishAddress(), nodeAttributes, version);
|
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
|
||||||
|
DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, publishAddress, nodeAttributes, version);
|
||||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
||||||
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
|
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build();
|
||||||
this.transportService.setLocalNode(localNode);
|
this.transportService.setLocalNode(localNode);
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.common.logging;
|
package org.elasticsearch.common.logging;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
import org.apache.lucene.util.SuppressForbidden;
|
||||||
import org.elasticsearch.common.Classes;
|
import org.elasticsearch.common.Classes;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
|
@ -74,20 +75,27 @@ public class Loggers {
|
||||||
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
|
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "using localhost for logging on which host it is is fine")
|
||||||
|
private static InetAddress getHostAddress() {
|
||||||
|
try {
|
||||||
|
return InetAddress.getLocalHost();
|
||||||
|
} catch (UnknownHostException e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
|
public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
|
||||||
List<String> prefixesList = newArrayList();
|
List<String> prefixesList = newArrayList();
|
||||||
if (settings.getAsBoolean("logger.logHostAddress", false)) {
|
if (settings.getAsBoolean("logger.logHostAddress", false)) {
|
||||||
try {
|
final InetAddress addr = getHostAddress();
|
||||||
prefixesList.add(InetAddress.getLocalHost().getHostAddress());
|
if (addr != null) {
|
||||||
} catch (UnknownHostException e) {
|
prefixesList.add(addr.getHostAddress());
|
||||||
// ignore
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (settings.getAsBoolean("logger.logHostName", false)) {
|
if (settings.getAsBoolean("logger.logHostName", false)) {
|
||||||
try {
|
final InetAddress addr = getHostAddress();
|
||||||
prefixesList.add(InetAddress.getLocalHost().getHostName());
|
if (addr != null) {
|
||||||
} catch (UnknownHostException e) {
|
prefixesList.add(addr.getHostName());
|
||||||
// ignore
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String name = settings.get("name");
|
String name = settings.get("name");
|
||||||
|
|
|
@ -28,11 +28,8 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.NetworkInterface;
|
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
@ -41,7 +38,8 @@ import java.util.concurrent.TimeUnit;
|
||||||
*/
|
*/
|
||||||
public class NetworkService extends AbstractComponent {
|
public class NetworkService extends AbstractComponent {
|
||||||
|
|
||||||
public static final String LOCAL = "#local#";
|
/** By default, we bind to loopback interfaces */
|
||||||
|
public static final String DEFAULT_NETWORK_HOST = "_local_";
|
||||||
|
|
||||||
private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
|
private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
|
||||||
private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
|
private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
|
||||||
|
@ -71,12 +69,12 @@ public class NetworkService extends AbstractComponent {
|
||||||
/**
|
/**
|
||||||
* Resolves the default value if possible. If not, return <tt>null</tt>.
|
* Resolves the default value if possible. If not, return <tt>null</tt>.
|
||||||
*/
|
*/
|
||||||
InetAddress resolveDefault();
|
InetAddress[] resolveDefault();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolves a custom value handling, return <tt>null</tt> if can't handle it.
|
* Resolves a custom value handling, return <tt>null</tt> if can't handle it.
|
||||||
*/
|
*/
|
||||||
InetAddress resolveIfPossible(String value);
|
InetAddress[] resolveIfPossible(String value);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
||||||
|
@ -94,100 +92,86 @@ public class NetworkService extends AbstractComponent {
|
||||||
customNameResolvers.add(customNameResolver);
|
customNameResolvers.add(customNameResolver);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public InetAddress[] resolveBindHostAddress(String bindHost) throws IOException {
|
||||||
public InetAddress resolveBindHostAddress(String bindHost) throws IOException {
|
// first check settings
|
||||||
return resolveBindHostAddress(bindHost, InetAddress.getLoopbackAddress().getHostAddress());
|
if (bindHost == null) {
|
||||||
}
|
bindHost = settings.get(GLOBAL_NETWORK_BINDHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING));
|
||||||
|
|
||||||
public InetAddress resolveBindHostAddress(String bindHost, String defaultValue2) throws IOException {
|
|
||||||
return resolveInetAddress(bindHost, settings.get(GLOBAL_NETWORK_BINDHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING)), defaultValue2);
|
|
||||||
}
|
|
||||||
|
|
||||||
public InetAddress resolvePublishHostAddress(String publishHost) throws IOException {
|
|
||||||
InetAddress address = resolvePublishHostAddress(publishHost,
|
|
||||||
InetAddress.getLoopbackAddress().getHostAddress());
|
|
||||||
// verify that its not a local address
|
|
||||||
if (address == null || address.isAnyLocalAddress()) {
|
|
||||||
address = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
|
|
||||||
if (address == null) {
|
|
||||||
address = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.getIpStackType());
|
|
||||||
if (address == null) {
|
|
||||||
address = NetworkUtils.getLocalAddress();
|
|
||||||
if (address == null) {
|
|
||||||
return NetworkUtils.getLocalhost(NetworkUtils.StackType.IPv4);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return address;
|
// next check any registered custom resolvers
|
||||||
}
|
if (bindHost == null) {
|
||||||
|
|
||||||
public InetAddress resolvePublishHostAddress(String publishHost, String defaultValue2) throws IOException {
|
|
||||||
return resolveInetAddress(publishHost, settings.get(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING)), defaultValue2);
|
|
||||||
}
|
|
||||||
|
|
||||||
public InetAddress resolveInetAddress(String host, String defaultValue1, String defaultValue2) throws UnknownHostException, IOException {
|
|
||||||
if (host == null) {
|
|
||||||
host = defaultValue1;
|
|
||||||
}
|
|
||||||
if (host == null) {
|
|
||||||
host = defaultValue2;
|
|
||||||
}
|
|
||||||
if (host == null) {
|
|
||||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||||
InetAddress inetAddress = customNameResolver.resolveDefault();
|
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||||
if (inetAddress != null) {
|
if (addresses != null) {
|
||||||
return inetAddress;
|
return addresses;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
String origHost = host;
|
// finally, fill with our default
|
||||||
|
if (bindHost == null) {
|
||||||
|
bindHost = DEFAULT_NETWORK_HOST;
|
||||||
|
}
|
||||||
|
return resolveInetAddress(bindHost);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: needs to be InetAddress[]
|
||||||
|
public InetAddress resolvePublishHostAddress(String publishHost) throws IOException {
|
||||||
|
// first check settings
|
||||||
|
if (publishHost == null) {
|
||||||
|
publishHost = settings.get(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.get(GLOBAL_NETWORK_HOST_SETTING));
|
||||||
|
}
|
||||||
|
// next check any registered custom resolvers
|
||||||
|
if (publishHost == null) {
|
||||||
|
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||||
|
InetAddress addresses[] = customNameResolver.resolveDefault();
|
||||||
|
if (addresses != null) {
|
||||||
|
return addresses[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// finally, fill with our default
|
||||||
|
if (publishHost == null) {
|
||||||
|
publishHost = DEFAULT_NETWORK_HOST;
|
||||||
|
}
|
||||||
|
// TODO: allow publishing multiple addresses
|
||||||
|
return resolveInetAddress(publishHost)[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
private InetAddress[] resolveInetAddress(String host) throws UnknownHostException, IOException {
|
||||||
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
|
if ((host.startsWith("#") && host.endsWith("#")) || (host.startsWith("_") && host.endsWith("_"))) {
|
||||||
host = host.substring(1, host.length() - 1);
|
host = host.substring(1, host.length() - 1);
|
||||||
|
// allow custom resolvers to have special names
|
||||||
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
for (CustomNameResolver customNameResolver : customNameResolvers) {
|
||||||
InetAddress inetAddress = customNameResolver.resolveIfPossible(host);
|
InetAddress addresses[] = customNameResolver.resolveIfPossible(host);
|
||||||
if (inetAddress != null) {
|
if (addresses != null) {
|
||||||
return inetAddress;
|
return addresses;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
switch (host) {
|
||||||
if (host.equals("local")) {
|
case "local":
|
||||||
return NetworkUtils.getLocalAddress();
|
return NetworkUtils.getLoopbackAddresses();
|
||||||
} else if (host.startsWith("non_loopback")) {
|
case "local:ipv4":
|
||||||
if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
|
return NetworkUtils.filterIPV4(NetworkUtils.getLoopbackAddresses());
|
||||||
return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
|
case "local:ipv6":
|
||||||
} else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
|
return NetworkUtils.filterIPV6(NetworkUtils.getLoopbackAddresses());
|
||||||
return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv6);
|
case "non_loopback":
|
||||||
} else {
|
return NetworkUtils.getFirstNonLoopbackAddresses();
|
||||||
return NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.getIpStackType());
|
case "non_loopback:ipv4":
|
||||||
}
|
return NetworkUtils.filterIPV4(NetworkUtils.getFirstNonLoopbackAddresses());
|
||||||
} else {
|
case "non_loopback:ipv6":
|
||||||
NetworkUtils.StackType stackType = NetworkUtils.getIpStackType();
|
return NetworkUtils.filterIPV6(NetworkUtils.getFirstNonLoopbackAddresses());
|
||||||
if (host.toLowerCase(Locale.ROOT).endsWith(":ipv4")) {
|
default:
|
||||||
stackType = NetworkUtils.StackType.IPv4;
|
/* an interface specification */
|
||||||
host = host.substring(0, host.length() - 5);
|
if (host.endsWith(":ipv4")) {
|
||||||
} else if (host.toLowerCase(Locale.ROOT).endsWith(":ipv6")) {
|
host = host.substring(0, host.length() - 5);
|
||||||
stackType = NetworkUtils.StackType.IPv6;
|
return NetworkUtils.filterIPV4(NetworkUtils.getAddressesForInterface(host));
|
||||||
host = host.substring(0, host.length() - 5);
|
} else if (host.endsWith(":ipv6")) {
|
||||||
}
|
host = host.substring(0, host.length() - 5);
|
||||||
Collection<NetworkInterface> allInterfs = NetworkUtils.getAllAvailableInterfaces();
|
return NetworkUtils.filterIPV6(NetworkUtils.getAddressesForInterface(host));
|
||||||
for (NetworkInterface ni : allInterfs) {
|
} else {
|
||||||
if (!ni.isUp()) {
|
return NetworkUtils.getAddressesForInterface(host);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
if (host.equals(ni.getName()) || host.equals(ni.getDisplayName())) {
|
|
||||||
if (ni.isLoopback()) {
|
|
||||||
return NetworkUtils.getFirstAddress(ni, stackType);
|
|
||||||
} else {
|
|
||||||
return NetworkUtils.getFirstNonLoopbackAddress(ni, stackType);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
throw new IOException("Failed to find network interface for [" + origHost + "]");
|
|
||||||
}
|
}
|
||||||
return InetAddress.getByName(host);
|
return NetworkUtils.getAllByName(host);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,303 +19,194 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.network;
|
package org.elasticsearch.common.network;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
|
||||||
import java.net.*;
|
import java.net.Inet4Address;
|
||||||
import java.util.*;
|
import java.net.Inet6Address;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.SocketException;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* Utilities for network interfaces / addresses
|
||||||
*/
|
*/
|
||||||
public abstract class NetworkUtils {
|
public abstract class NetworkUtils {
|
||||||
|
|
||||||
|
/** no instantation */
|
||||||
|
private NetworkUtils() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* By default we bind to any addresses on an interface/name, unless restricted by :ipv4 etc.
|
||||||
|
* This property is unrelated to that, this is about what we *publish*. Today the code pretty much
|
||||||
|
* expects one address so this is used for the sort order.
|
||||||
|
* @deprecated transition mechanism only
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
static final boolean PREFER_V6 = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses", "false"));
|
||||||
|
|
||||||
|
/** Sorts an address by preference. This way code like publishing can just pick the first one */
|
||||||
|
static int sortKey(InetAddress address, boolean prefer_v6) {
|
||||||
|
int key = address.getAddress().length;
|
||||||
|
if (prefer_v6) {
|
||||||
|
key = -key;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (address.isAnyLocalAddress()) {
|
||||||
|
key += 5;
|
||||||
|
}
|
||||||
|
if (address.isMulticastAddress()) {
|
||||||
|
key += 4;
|
||||||
|
}
|
||||||
|
if (address.isLoopbackAddress()) {
|
||||||
|
key += 3;
|
||||||
|
}
|
||||||
|
if (address.isLinkLocalAddress()) {
|
||||||
|
key += 2;
|
||||||
|
}
|
||||||
|
if (address.isSiteLocalAddress()) {
|
||||||
|
key += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sorts addresses by order of preference. This is used to pick the first one for publishing
|
||||||
|
* @deprecated remove this when multihoming is really correct
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
private static void sortAddresses(List<InetAddress> list) {
|
||||||
|
Collections.sort(list, new Comparator<InetAddress>() {
|
||||||
|
@Override
|
||||||
|
public int compare(InetAddress left, InetAddress right) {
|
||||||
|
int cmp = Integer.compare(sortKey(left, PREFER_V6), sortKey(right, PREFER_V6));
|
||||||
|
if (cmp == 0) {
|
||||||
|
cmp = new BytesRef(left.getAddress()).compareTo(new BytesRef(right.getAddress()));
|
||||||
|
}
|
||||||
|
return cmp;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
private final static ESLogger logger = Loggers.getLogger(NetworkUtils.class);
|
private final static ESLogger logger = Loggers.getLogger(NetworkUtils.class);
|
||||||
|
|
||||||
public static enum StackType {
|
/** Return all interfaces (and subinterfaces) on the system */
|
||||||
IPv4, IPv6, Unknown
|
static List<NetworkInterface> getInterfaces() throws SocketException {
|
||||||
|
List<NetworkInterface> all = new ArrayList<>();
|
||||||
|
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
|
||||||
|
Collections.sort(all, new Comparator<NetworkInterface>() {
|
||||||
|
@Override
|
||||||
|
public int compare(NetworkInterface left, NetworkInterface right) {
|
||||||
|
return Integer.compare(left.getIndex(), right.getIndex());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return all;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final String IPv4_SETTING = "java.net.preferIPv4Stack";
|
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
|
||||||
public static final String IPv6_SETTING = "java.net.preferIPv6Addresses";
|
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
|
||||||
|
if (!level.isEmpty()) {
|
||||||
public static final String NON_LOOPBACK_ADDRESS = "non_loopback_address";
|
target.addAll(level);
|
||||||
|
for (NetworkInterface intf : level) {
|
||||||
private final static InetAddress localAddress;
|
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
|
||||||
|
}
|
||||||
static {
|
|
||||||
InetAddress localAddressX;
|
|
||||||
try {
|
|
||||||
localAddressX = InetAddress.getLocalHost();
|
|
||||||
} catch (Throwable e) {
|
|
||||||
logger.warn("failed to resolve local host, fallback to loopback", e);
|
|
||||||
localAddressX = InetAddress.getLoopbackAddress();
|
|
||||||
}
|
}
|
||||||
localAddress = localAddressX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns system default for SO_REUSEADDR */
|
||||||
public static boolean defaultReuseAddress() {
|
public static boolean defaultReuseAddress() {
|
||||||
return Constants.WINDOWS ? false : true;
|
return Constants.WINDOWS ? false : true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean isIPv4() {
|
/** Returns addresses for all loopback interfaces that are up. */
|
||||||
return System.getProperty("java.net.preferIPv4Stack") != null && System.getProperty("java.net.preferIPv4Stack").equals("true");
|
public static InetAddress[] getLoopbackAddresses() throws SocketException {
|
||||||
}
|
List<InetAddress> list = new ArrayList<>();
|
||||||
|
|
||||||
public static InetAddress getIPv4Localhost() throws UnknownHostException {
|
|
||||||
return getLocalhost(StackType.IPv4);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static InetAddress getIPv6Localhost() throws UnknownHostException {
|
|
||||||
return getLocalhost(StackType.IPv6);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static InetAddress getLocalAddress() {
|
|
||||||
return localAddress;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String getLocalHostName(String defaultHostName) {
|
|
||||||
if (localAddress == null) {
|
|
||||||
return defaultHostName;
|
|
||||||
}
|
|
||||||
String hostName = localAddress.getHostName();
|
|
||||||
if (hostName == null) {
|
|
||||||
return defaultHostName;
|
|
||||||
}
|
|
||||||
return hostName;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String getLocalHostAddress(String defaultHostAddress) {
|
|
||||||
if (localAddress == null) {
|
|
||||||
return defaultHostAddress;
|
|
||||||
}
|
|
||||||
String hostAddress = localAddress.getHostAddress();
|
|
||||||
if (hostAddress == null) {
|
|
||||||
return defaultHostAddress;
|
|
||||||
}
|
|
||||||
return hostAddress;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static InetAddress getLocalhost(StackType ip_version) throws UnknownHostException {
|
|
||||||
if (ip_version == StackType.IPv4)
|
|
||||||
return InetAddress.getByName("127.0.0.1");
|
|
||||||
else
|
|
||||||
return InetAddress.getByName("::1");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the first non-loopback address on any interface on the current host.
|
|
||||||
*
|
|
||||||
* @param ip_version Constraint on IP version of address to be returned, 4 or 6
|
|
||||||
*/
|
|
||||||
public static InetAddress getFirstNonLoopbackAddress(StackType ip_version) throws SocketException {
|
|
||||||
InetAddress address;
|
|
||||||
for (NetworkInterface intf : getInterfaces()) {
|
for (NetworkInterface intf : getInterfaces()) {
|
||||||
try {
|
if (intf.isLoopback() && intf.isUp()) {
|
||||||
if (!intf.isUp() || intf.isLoopback())
|
list.addAll(Collections.list(intf.getInetAddresses()));
|
||||||
continue;
|
|
||||||
} catch (Exception e) {
|
|
||||||
// might happen when calling on a network interface that does not exists
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
address = getFirstNonLoopbackAddress(intf, ip_version);
|
|
||||||
if (address != null) {
|
|
||||||
return address;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (list.isEmpty()) {
|
||||||
return null;
|
throw new IllegalArgumentException("No up-and-running loopback interfaces found, got " + getInterfaces());
|
||||||
}
|
|
||||||
|
|
||||||
private static List<NetworkInterface> getInterfaces() throws SocketException {
|
|
||||||
Enumeration intfs = NetworkInterface.getNetworkInterfaces();
|
|
||||||
|
|
||||||
List<NetworkInterface> intfsList = Lists.newArrayList();
|
|
||||||
while (intfs.hasMoreElements()) {
|
|
||||||
intfsList.add((NetworkInterface) intfs.nextElement());
|
|
||||||
}
|
}
|
||||||
|
sortAddresses(list);
|
||||||
sortInterfaces(intfsList);
|
return list.toArray(new InetAddress[list.size()]);
|
||||||
return intfsList;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void sortInterfaces(List<NetworkInterface> intfsList) {
|
/** Returns addresses for the first non-loopback interface that is up. */
|
||||||
// order by index, assuming first ones are more interesting
|
public static InetAddress[] getFirstNonLoopbackAddresses() throws SocketException {
|
||||||
CollectionUtil.timSort(intfsList, new Comparator<NetworkInterface>() {
|
List<InetAddress> list = new ArrayList<>();
|
||||||
@Override
|
for (NetworkInterface intf : getInterfaces()) {
|
||||||
public int compare(NetworkInterface o1, NetworkInterface o2) {
|
if (intf.isLoopback() == false && intf.isUp()) {
|
||||||
return Integer.compare (o1.getIndex(), o2.getIndex());
|
list.addAll(Collections.list(intf.getInetAddresses()));
|
||||||
}
|
break;
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the first non-loopback address on the given interface on the current host.
|
|
||||||
*
|
|
||||||
* @param intf the interface to be checked
|
|
||||||
* @param ipVersion Constraint on IP version of address to be returned, 4 or 6
|
|
||||||
*/
|
|
||||||
public static InetAddress getFirstNonLoopbackAddress(NetworkInterface intf, StackType ipVersion) throws SocketException {
|
|
||||||
if (intf == null)
|
|
||||||
throw new IllegalArgumentException("Network interface pointer is null");
|
|
||||||
|
|
||||||
for (Enumeration addresses = intf.getInetAddresses(); addresses.hasMoreElements(); ) {
|
|
||||||
InetAddress address = (InetAddress) addresses.nextElement();
|
|
||||||
if (!address.isLoopbackAddress()) {
|
|
||||||
if ((address instanceof Inet4Address && ipVersion == StackType.IPv4) ||
|
|
||||||
(address instanceof Inet6Address && ipVersion == StackType.IPv6))
|
|
||||||
return address;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
if (list.isEmpty()) {
|
||||||
}
|
throw new IllegalArgumentException("No up-and-running non-loopback interfaces found, got " + getInterfaces());
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the first address with the proper ipVersion on the given interface on the current host.
|
|
||||||
*
|
|
||||||
* @param intf the interface to be checked
|
|
||||||
* @param ipVersion Constraint on IP version of address to be returned, 4 or 6
|
|
||||||
*/
|
|
||||||
public static InetAddress getFirstAddress(NetworkInterface intf, StackType ipVersion) throws SocketException {
|
|
||||||
if (intf == null)
|
|
||||||
throw new IllegalArgumentException("Network interface pointer is null");
|
|
||||||
|
|
||||||
for (Enumeration addresses = intf.getInetAddresses(); addresses.hasMoreElements(); ) {
|
|
||||||
InetAddress address = (InetAddress) addresses.nextElement();
|
|
||||||
if ((address instanceof Inet4Address && ipVersion == StackType.IPv4) ||
|
|
||||||
(address instanceof Inet6Address && ipVersion == StackType.IPv6))
|
|
||||||
return address;
|
|
||||||
}
|
}
|
||||||
return null;
|
sortAddresses(list);
|
||||||
|
return list.toArray(new InetAddress[list.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** Returns addresses for the given interface (it must be marked up) */
|
||||||
* A function to check if an interface supports an IP version (i.e has addresses
|
public static InetAddress[] getAddressesForInterface(String name) throws SocketException {
|
||||||
* defined for that IP version).
|
NetworkInterface intf = NetworkInterface.getByName(name);
|
||||||
*
|
if (intf == null) {
|
||||||
* @param intf
|
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
|
||||||
* @return
|
|
||||||
*/
|
|
||||||
public static boolean interfaceHasIPAddresses(NetworkInterface intf, StackType ipVersion) throws SocketException, UnknownHostException {
|
|
||||||
boolean supportsVersion = false;
|
|
||||||
if (intf != null) {
|
|
||||||
// get all the InetAddresses defined on the interface
|
|
||||||
Enumeration addresses = intf.getInetAddresses();
|
|
||||||
while (addresses != null && addresses.hasMoreElements()) {
|
|
||||||
// get the next InetAddress for the current interface
|
|
||||||
InetAddress address = (InetAddress) addresses.nextElement();
|
|
||||||
|
|
||||||
// check if we find an address of correct version
|
|
||||||
if ((address instanceof Inet4Address && (ipVersion == StackType.IPv4)) ||
|
|
||||||
(address instanceof Inet6Address && (ipVersion == StackType.IPv6))) {
|
|
||||||
supportsVersion = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new UnknownHostException("network interface not found");
|
|
||||||
}
|
}
|
||||||
return supportsVersion;
|
if (!intf.isUp()) {
|
||||||
|
throw new IllegalArgumentException("Interface '" + name + "' is not up and running");
|
||||||
|
}
|
||||||
|
List<InetAddress> list = Collections.list(intf.getInetAddresses());
|
||||||
|
if (list.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses");
|
||||||
|
}
|
||||||
|
sortAddresses(list);
|
||||||
|
return list.toArray(new InetAddress[list.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** Returns addresses for the given host, sorted by order of preference */
|
||||||
* Tries to determine the type of IP stack from the available interfaces and their addresses and from the
|
public static InetAddress[] getAllByName(String host) throws UnknownHostException {
|
||||||
* system properties (java.net.preferIPv4Stack and java.net.preferIPv6Addresses)
|
InetAddress addresses[] = InetAddress.getAllByName(host);
|
||||||
*
|
sortAddresses(Arrays.asList(addresses));
|
||||||
* @return StackType.IPv4 for an IPv4 only stack, StackYTypeIPv6 for an IPv6 only stack, and StackType.Unknown
|
return addresses;
|
||||||
* if the type cannot be detected
|
|
||||||
*/
|
|
||||||
public static StackType getIpStackType() {
|
|
||||||
boolean isIPv4StackAvailable = isStackAvailable(true);
|
|
||||||
boolean isIPv6StackAvailable = isStackAvailable(false);
|
|
||||||
|
|
||||||
// if only IPv4 stack available
|
|
||||||
if (isIPv4StackAvailable && !isIPv6StackAvailable) {
|
|
||||||
return StackType.IPv4;
|
|
||||||
}
|
|
||||||
// if only IPv6 stack available
|
|
||||||
else if (isIPv6StackAvailable && !isIPv4StackAvailable) {
|
|
||||||
return StackType.IPv6;
|
|
||||||
}
|
|
||||||
// if dual stack
|
|
||||||
else if (isIPv4StackAvailable && isIPv6StackAvailable) {
|
|
||||||
// get the System property which records user preference for a stack on a dual stack machine
|
|
||||||
if (Boolean.getBoolean(IPv4_SETTING)) // has preference over java.net.preferIPv6Addresses
|
|
||||||
return StackType.IPv4;
|
|
||||||
if (Boolean.getBoolean(IPv6_SETTING))
|
|
||||||
return StackType.IPv6;
|
|
||||||
return StackType.IPv6;
|
|
||||||
}
|
|
||||||
return StackType.Unknown;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns only the IPV4 addresses in {@code addresses} */
|
||||||
public static boolean isStackAvailable(boolean ipv4) {
|
public static InetAddress[] filterIPV4(InetAddress addresses[]) {
|
||||||
Collection<InetAddress> allAddrs = getAllAvailableAddresses();
|
List<InetAddress> list = new ArrayList<>();
|
||||||
for (InetAddress addr : allAddrs)
|
for (InetAddress address : addresses) {
|
||||||
if (ipv4 && addr instanceof Inet4Address || (!ipv4 && addr instanceof Inet6Address))
|
if (address instanceof Inet4Address) {
|
||||||
return true;
|
list.add(address);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns all the available interfaces, including first level sub interfaces.
|
|
||||||
*/
|
|
||||||
public static List<NetworkInterface> getAllAvailableInterfaces() throws SocketException {
|
|
||||||
List<NetworkInterface> allInterfaces = new ArrayList<>();
|
|
||||||
for (Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); interfaces.hasMoreElements(); ) {
|
|
||||||
NetworkInterface intf = interfaces.nextElement();
|
|
||||||
allInterfaces.add(intf);
|
|
||||||
|
|
||||||
Enumeration<NetworkInterface> subInterfaces = intf.getSubInterfaces();
|
|
||||||
if (subInterfaces != null && subInterfaces.hasMoreElements()) {
|
|
||||||
while (subInterfaces.hasMoreElements()) {
|
|
||||||
allInterfaces.add(subInterfaces.nextElement());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sortInterfaces(allInterfaces);
|
if (list.isEmpty()) {
|
||||||
return allInterfaces;
|
throw new IllegalArgumentException("No ipv4 addresses found in " + Arrays.toString(addresses));
|
||||||
}
|
|
||||||
|
|
||||||
public static Collection<InetAddress> getAllAvailableAddresses() {
|
|
||||||
// we want consistent order here.
|
|
||||||
final Set<InetAddress> retval = new TreeSet<>(new Comparator<InetAddress>() {
|
|
||||||
BytesRef left = new BytesRef();
|
|
||||||
BytesRef right = new BytesRef();
|
|
||||||
@Override
|
|
||||||
public int compare(InetAddress o1, InetAddress o2) {
|
|
||||||
return set(left, o1).compareTo(set(right, o1));
|
|
||||||
}
|
|
||||||
|
|
||||||
private BytesRef set(BytesRef ref, InetAddress addr) {
|
|
||||||
ref.bytes = addr.getAddress();
|
|
||||||
ref.offset = 0;
|
|
||||||
ref.length = ref.bytes.length;
|
|
||||||
return ref;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
try {
|
|
||||||
for (NetworkInterface intf : getInterfaces()) {
|
|
||||||
Enumeration<InetAddress> addrs = intf.getInetAddresses();
|
|
||||||
while (addrs.hasMoreElements())
|
|
||||||
retval.add(addrs.nextElement());
|
|
||||||
}
|
|
||||||
} catch (SocketException e) {
|
|
||||||
logger.warn("Failed to derive all available interfaces", e);
|
|
||||||
}
|
}
|
||||||
|
return list.toArray(new InetAddress[list.size()]);
|
||||||
return retval;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Returns only the IPV6 addresses in {@code addresses} */
|
||||||
private NetworkUtils() {
|
public static InetAddress[] filterIPV6(InetAddress addresses[]) {
|
||||||
|
List<InetAddress> list = new ArrayList<>();
|
||||||
|
for (InetAddress address : addresses) {
|
||||||
|
if (address instanceof Inet6Address) {
|
||||||
|
list.add(address);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (list.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("No ipv6 addresses found in " + Arrays.toString(addresses));
|
||||||
|
}
|
||||||
|
return list.toArray(new InetAddress[list.size()]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,21 @@ public class DummyTransportAddress implements TransportAddress {
|
||||||
return other == INSTANCE;
|
return other == INSTANCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "dummy";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAddress() {
|
||||||
|
return "0.0.0.0"; // see https://en.wikipedia.org/wiki/0.0.0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getPort() {
|
||||||
|
return 42;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DummyTransportAddress readFrom(StreamInput in) throws IOException {
|
public DummyTransportAddress readFrom(StreamInput in) throws IOException {
|
||||||
return INSTANCE;
|
return INSTANCE;
|
||||||
|
|
|
@ -30,7 +30,7 @@ import java.net.InetSocketAddress;
|
||||||
/**
|
/**
|
||||||
* A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}).
|
* A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}).
|
||||||
*/
|
*/
|
||||||
public class InetSocketTransportAddress implements TransportAddress {
|
public final class InetSocketTransportAddress implements TransportAddress {
|
||||||
|
|
||||||
private static boolean resolveAddress = false;
|
private static boolean resolveAddress = false;
|
||||||
|
|
||||||
|
@ -92,6 +92,21 @@ public class InetSocketTransportAddress implements TransportAddress {
|
||||||
address.getAddress().equals(((InetSocketTransportAddress) other).address.getAddress());
|
address.getAddress().equals(((InetSocketTransportAddress) other).address.getAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return address.getHostName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAddress() {
|
||||||
|
return address.getAddress().getHostAddress();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getPort() {
|
||||||
|
return address.getPort();
|
||||||
|
}
|
||||||
|
|
||||||
public InetSocketAddress address() {
|
public InetSocketAddress address() {
|
||||||
return this.address;
|
return this.address;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class LocalTransportAddress implements TransportAddress {
|
public final class LocalTransportAddress implements TransportAddress {
|
||||||
|
|
||||||
public static final LocalTransportAddress PROTO = new LocalTransportAddress("_na");
|
public static final LocalTransportAddress PROTO = new LocalTransportAddress("_na");
|
||||||
|
|
||||||
|
@ -57,6 +57,21 @@ public class LocalTransportAddress implements TransportAddress {
|
||||||
return other instanceof LocalTransportAddress && id.equals(((LocalTransportAddress) other).id);
|
return other instanceof LocalTransportAddress && id.equals(((LocalTransportAddress) other).id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHost() {
|
||||||
|
return "local";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAddress() {
|
||||||
|
return "0.0.0.0"; // see https://en.wikipedia.org/wiki/0.0.0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getPort() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public LocalTransportAddress readFrom(StreamInput in) throws IOException {
|
public LocalTransportAddress readFrom(StreamInput in) throws IOException {
|
||||||
return new LocalTransportAddress(in);
|
return new LocalTransportAddress(in);
|
||||||
|
|
|
@ -28,7 +28,24 @@ import org.elasticsearch.common.io.stream.Writeable;
|
||||||
*/
|
*/
|
||||||
public interface TransportAddress extends Writeable<TransportAddress> {
|
public interface TransportAddress extends Writeable<TransportAddress> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the host string for this transport address
|
||||||
|
*/
|
||||||
|
String getHost();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the address string for this transport address
|
||||||
|
*/
|
||||||
|
String getAddress();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the port of this transport address if applicable
|
||||||
|
*/
|
||||||
|
int getPort();
|
||||||
|
|
||||||
short uniqueAddressTypeId();
|
short uniqueAddressTypeId();
|
||||||
|
|
||||||
boolean sameHost(TransportAddress other);
|
boolean sameHost(TransportAddress other);
|
||||||
|
|
||||||
|
public String toString();
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,13 +131,16 @@ public abstract class ExtensionPoint {
|
||||||
* the settings object.
|
* the settings object.
|
||||||
*
|
*
|
||||||
* @param binder the binder to use
|
* @param binder the binder to use
|
||||||
* @param settings the settings to look up the key to find the implemetation to bind
|
* @param settings the settings to look up the key to find the implementation to bind
|
||||||
* @param settingsKey the key to use with the settings
|
* @param settingsKey the key to use with the settings
|
||||||
* @param defaultValue the default value if they settings doesn't contain the key
|
* @param defaultValue the default value if the settings do not contain the key, or null if there is no default
|
||||||
* @return the actual bound type key
|
* @return the actual bound type key
|
||||||
*/
|
*/
|
||||||
public String bindType(Binder binder, Settings settings, String settingsKey, String defaultValue) {
|
public String bindType(Binder binder, Settings settings, String settingsKey, String defaultValue) {
|
||||||
final String type = settings.get(settingsKey, defaultValue);
|
final String type = settings.get(settingsKey, defaultValue);
|
||||||
|
if (type == null) {
|
||||||
|
throw new IllegalArgumentException("Missing setting [" + settingsKey + "]");
|
||||||
|
}
|
||||||
final Class<? extends T> instance = getExtension(type);
|
final Class<? extends T> instance = getExtension(type);
|
||||||
if (instance == null) {
|
if (instance == null) {
|
||||||
throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "]");
|
throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "]");
|
||||||
|
|
|
@ -131,7 +131,9 @@ public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implem
|
||||||
boolean deferToInterface = settings.getAsBoolean("discovery.zen.ping.multicast.defer_group_to_set_interface", Constants.MAC_OS_X);
|
boolean deferToInterface = settings.getAsBoolean("discovery.zen.ping.multicast.defer_group_to_set_interface", Constants.MAC_OS_X);
|
||||||
multicastChannel = MulticastChannel.getChannel(nodeName(), shared,
|
multicastChannel = MulticastChannel.getChannel(nodeName(), shared,
|
||||||
new MulticastChannel.Config(port, group, bufferSize, ttl,
|
new MulticastChannel.Config(port, group, bufferSize, ttl,
|
||||||
networkService.resolvePublishHostAddress(address),
|
// don't use publish address, the use case for that is e.g. a firewall or proxy and
|
||||||
|
// may not even be bound to an interface on this machine! use the first bound address.
|
||||||
|
networkService.resolveBindHostAddress(address)[0],
|
||||||
deferToInterface),
|
deferToInterface),
|
||||||
new Receiver());
|
new Receiver());
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
|
|
@ -51,6 +51,10 @@ import org.jboss.netty.handler.timeout.ReadTimeoutException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.SocketAddress;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
@ -128,7 +132,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||||
|
|
||||||
protected volatile BoundTransportAddress boundAddress;
|
protected volatile BoundTransportAddress boundAddress;
|
||||||
|
|
||||||
protected volatile Channel serverChannel;
|
protected volatile List<Channel> serverChannels = new ArrayList<>();
|
||||||
|
|
||||||
protected OpenChannelsHandler serverOpenChannels;
|
protected OpenChannelsHandler serverOpenChannels;
|
||||||
|
|
||||||
|
@ -243,33 +247,18 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||||
serverBootstrap.setOption("child.reuseAddress", reuseAddress);
|
serverBootstrap.setOption("child.reuseAddress", reuseAddress);
|
||||||
|
|
||||||
// Bind and start to accept incoming connections.
|
// Bind and start to accept incoming connections.
|
||||||
InetAddress hostAddressX;
|
InetAddress hostAddresses[];
|
||||||
try {
|
try {
|
||||||
hostAddressX = networkService.resolveBindHostAddress(bindHost);
|
hostAddresses = networkService.resolveBindHostAddress(bindHost);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
|
throw new BindHttpException("Failed to resolve host [" + bindHost + "]", e);
|
||||||
}
|
}
|
||||||
final InetAddress hostAddress = hostAddressX;
|
|
||||||
|
for (InetAddress address : hostAddresses) {
|
||||||
PortsRange portsRange = new PortsRange(port);
|
bindAddress(address);
|
||||||
final AtomicReference<Exception> lastException = new AtomicReference<>();
|
|
||||||
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
|
|
||||||
@Override
|
|
||||||
public boolean onPortNumber(int portNumber) {
|
|
||||||
try {
|
|
||||||
serverChannel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
|
|
||||||
} catch (Exception e) {
|
|
||||||
lastException.set(e);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (!success) {
|
|
||||||
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
InetSocketAddress boundAddress = (InetSocketAddress) serverChannel.getLocalAddress();
|
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(0).getLocalAddress();
|
||||||
InetSocketAddress publishAddress;
|
InetSocketAddress publishAddress;
|
||||||
if (0 == publishPort) {
|
if (0 == publishPort) {
|
||||||
publishPort = boundAddress.getPort();
|
publishPort = boundAddress.getPort();
|
||||||
|
@ -281,12 +270,42 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
||||||
}
|
}
|
||||||
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
|
this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void bindAddress(final InetAddress hostAddress) {
|
||||||
|
PortsRange portsRange = new PortsRange(port);
|
||||||
|
final AtomicReference<Exception> lastException = new AtomicReference<>();
|
||||||
|
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>();
|
||||||
|
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
|
||||||
|
@Override
|
||||||
|
public boolean onPortNumber(int portNumber) {
|
||||||
|
try {
|
||||||
|
synchronized (serverChannels) {
|
||||||
|
Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
|
||||||
|
serverChannels.add(channel);
|
||||||
|
boundSocket.set(channel.getLocalAddress());
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
lastException.set(e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (!success) {
|
||||||
|
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
|
||||||
|
}
|
||||||
|
logger.info("Bound http to address [{}]", boundSocket.get());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doStop() {
|
protected void doStop() {
|
||||||
if (serverChannel != null) {
|
synchronized (serverChannels) {
|
||||||
serverChannel.close().awaitUninterruptibly();
|
if (serverChannels != null) {
|
||||||
serverChannel = null;
|
for (Channel channel : serverChannels) {
|
||||||
|
channel.close().awaitUninterruptibly();
|
||||||
|
}
|
||||||
|
serverChannels = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (serverOpenChannels != null) {
|
if (serverOpenChannels != null) {
|
||||||
|
|
|
@ -36,10 +36,12 @@ public interface IndexSearcherWrapper {
|
||||||
DirectoryReader wrap(DirectoryReader reader);
|
DirectoryReader wrap(DirectoryReader reader);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param searcher The provided index searcher to be wrapped to add custom functionality
|
* @param engineConfig The engine config which can be used to get the query cache and query cache policy from
|
||||||
|
* when creating a new index searcher
|
||||||
|
* @param searcher The provided index searcher to be wrapped to add custom functionality
|
||||||
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
|
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
|
||||||
* the provided index searcher
|
* the provided index searcher
|
||||||
*/
|
*/
|
||||||
IndexSearcher wrap(IndexSearcher searcher) throws EngineException;
|
IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ public final class IndexSearcherWrappingService {
|
||||||
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
|
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
|
||||||
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
|
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
|
||||||
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
|
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
|
||||||
IndexSearcher indexSearcher = wrapper.wrap(innerIndexSearcher);
|
IndexSearcher indexSearcher = wrapper.wrap(engineConfig, innerIndexSearcher);
|
||||||
if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) {
|
if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) {
|
||||||
return engineSearcher;
|
return engineSearcher;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -101,8 +101,7 @@ public class DocumentMapperParser {
|
||||||
.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser())
|
.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser())
|
||||||
.put(TypeParsers.MULTI_FIELD_CONTENT_TYPE, TypeParsers.multiFieldConverterTypeParser)
|
.put(TypeParsers.MULTI_FIELD_CONTENT_TYPE, TypeParsers.multiFieldConverterTypeParser)
|
||||||
.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser())
|
.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser())
|
||||||
.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser())
|
.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
|
||||||
.put(Murmur3FieldMapper.CONTENT_TYPE, new Murmur3FieldMapper.TypeParser());
|
|
||||||
|
|
||||||
if (ShapesAvailability.JTS_AVAILABLE) {
|
if (ShapesAvailability.JTS_AVAILABLE) {
|
||||||
typeParsersBuilder.put(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
|
typeParsersBuilder.put(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
|
||||||
|
|
|
@ -84,10 +84,6 @@ public final class MapperBuilders {
|
||||||
return new LongFieldMapper.Builder(name);
|
return new LongFieldMapper.Builder(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Murmur3FieldMapper.Builder murmur3Field(String name) {
|
|
||||||
return new Murmur3FieldMapper.Builder(name);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static FloatFieldMapper.Builder floatField(String name) {
|
public static FloatFieldMapper.Builder floatField(String name) {
|
||||||
return new FloatFieldMapper.Builder(name);
|
return new FloatFieldMapper.Builder(name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
public static final String LON_SUFFIX = "." + LON;
|
public static final String LON_SUFFIX = "." + LON;
|
||||||
public static final String GEOHASH = "geohash";
|
public static final String GEOHASH = "geohash";
|
||||||
public static final String GEOHASH_SUFFIX = "." + GEOHASH;
|
public static final String GEOHASH_SUFFIX = "." + GEOHASH;
|
||||||
|
public static final String IGNORE_MALFORMED = "ignore_malformed";
|
||||||
|
public static final String COERCE = "coerce";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Defaults {
|
public static class Defaults {
|
||||||
|
@ -93,10 +95,9 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
public static final boolean ENABLE_GEOHASH = false;
|
public static final boolean ENABLE_GEOHASH = false;
|
||||||
public static final boolean ENABLE_GEOHASH_PREFIX = false;
|
public static final boolean ENABLE_GEOHASH_PREFIX = false;
|
||||||
public static final int GEO_HASH_PRECISION = GeoHashUtils.PRECISION;
|
public static final int GEO_HASH_PRECISION = GeoHashUtils.PRECISION;
|
||||||
public static final boolean NORMALIZE_LAT = true;
|
|
||||||
public static final boolean NORMALIZE_LON = true;
|
public static final boolean IGNORE_MALFORMED = false;
|
||||||
public static final boolean VALIDATE_LAT = true;
|
public static final boolean COERCE = false;
|
||||||
public static final boolean VALIDATE_LON = true;
|
|
||||||
|
|
||||||
public static final MappedFieldType FIELD_TYPE = new GeoPointFieldType();
|
public static final MappedFieldType FIELD_TYPE = new GeoPointFieldType();
|
||||||
|
|
||||||
|
@ -215,6 +216,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
@Override
|
@Override
|
||||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||||
Builder builder = geoPointField(name);
|
Builder builder = geoPointField(name);
|
||||||
|
final boolean indexCreatedBeforeV2_0 = parserContext.indexVersionCreated().before(Version.V_2_0_0);
|
||||||
parseField(builder, name, node, parserContext);
|
parseField(builder, name, node, parserContext);
|
||||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||||
Map.Entry<String, Object> entry = iterator.next();
|
Map.Entry<String, Object> entry = iterator.next();
|
||||||
|
@ -245,25 +247,42 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(fieldNode.toString()));
|
builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(fieldNode.toString()));
|
||||||
}
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("validate")) {
|
} else if (fieldName.equals(Names.IGNORE_MALFORMED)) {
|
||||||
builder.fieldType().setValidateLat(XContentMapValues.nodeBooleanValue(fieldNode));
|
if (builder.fieldType().coerce == false) {
|
||||||
builder.fieldType().setValidateLon(XContentMapValues.nodeBooleanValue(fieldNode));
|
builder.fieldType().ignoreMalformed = XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("validate_lon")) {
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("validate")) {
|
||||||
builder.fieldType().setValidateLon(XContentMapValues.nodeBooleanValue(fieldNode));
|
if (builder.fieldType().ignoreMalformed == false) {
|
||||||
|
builder.fieldType().ignoreMalformed = !XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
}
|
||||||
|
iterator.remove();
|
||||||
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("validate_lon")) {
|
||||||
|
if (builder.fieldType().ignoreMalformed() == false) {
|
||||||
|
builder.fieldType().ignoreMalformed = !XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("validate_lat")) {
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("validate_lat")) {
|
||||||
builder.fieldType().setValidateLat(XContentMapValues.nodeBooleanValue(fieldNode));
|
if (builder.fieldType().ignoreMalformed == false) {
|
||||||
|
builder.fieldType().ignoreMalformed = !XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("normalize")) {
|
} else if (fieldName.equals(Names.COERCE)) {
|
||||||
builder.fieldType().setNormalizeLat(XContentMapValues.nodeBooleanValue(fieldNode));
|
builder.fieldType().coerce = XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
builder.fieldType().setNormalizeLon(XContentMapValues.nodeBooleanValue(fieldNode));
|
if (builder.fieldType().coerce == true) {
|
||||||
|
builder.fieldType().ignoreMalformed = true;
|
||||||
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("normalize_lat")) {
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("normalize")) {
|
||||||
builder.fieldType().setNormalizeLat(XContentMapValues.nodeBooleanValue(fieldNode));
|
builder.fieldType().coerce = XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (fieldName.equals("normalize_lon")) {
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("normalize_lat")) {
|
||||||
builder.fieldType().setNormalizeLon(XContentMapValues.nodeBooleanValue(fieldNode));
|
builder.fieldType().coerce = XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
iterator.remove();
|
||||||
|
} else if (indexCreatedBeforeV2_0 && fieldName.equals("normalize_lon")) {
|
||||||
|
if (builder.fieldType().coerce == false) {
|
||||||
|
builder.fieldType().coerce = XContentMapValues.nodeBooleanValue(fieldNode);
|
||||||
|
}
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
} else if (parseMultiField(builder, name, parserContext, fieldName, fieldNode)) {
|
} else if (parseMultiField(builder, name, parserContext, fieldName, fieldNode)) {
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
|
@ -281,10 +300,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
|
|
||||||
private MappedFieldType latFieldType;
|
private MappedFieldType latFieldType;
|
||||||
private MappedFieldType lonFieldType;
|
private MappedFieldType lonFieldType;
|
||||||
private boolean validateLon = true;
|
private boolean ignoreMalformed = false;
|
||||||
private boolean validateLat = true;
|
private boolean coerce = false;
|
||||||
private boolean normalizeLon = true;
|
|
||||||
private boolean normalizeLat = true;
|
|
||||||
|
|
||||||
public GeoPointFieldType() {}
|
public GeoPointFieldType() {}
|
||||||
|
|
||||||
|
@ -295,10 +312,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
this.geohashPrefixEnabled = ref.geohashPrefixEnabled;
|
this.geohashPrefixEnabled = ref.geohashPrefixEnabled;
|
||||||
this.latFieldType = ref.latFieldType; // copying ref is ok, this can never be modified
|
this.latFieldType = ref.latFieldType; // copying ref is ok, this can never be modified
|
||||||
this.lonFieldType = ref.lonFieldType; // copying ref is ok, this can never be modified
|
this.lonFieldType = ref.lonFieldType; // copying ref is ok, this can never be modified
|
||||||
this.validateLon = ref.validateLon;
|
this.coerce = ref.coerce;
|
||||||
this.validateLat = ref.validateLat;
|
this.ignoreMalformed = ref.ignoreMalformed;
|
||||||
this.normalizeLon = ref.normalizeLon;
|
|
||||||
this.normalizeLat = ref.normalizeLat;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -312,10 +327,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
GeoPointFieldType that = (GeoPointFieldType) o;
|
GeoPointFieldType that = (GeoPointFieldType) o;
|
||||||
return geohashPrecision == that.geohashPrecision &&
|
return geohashPrecision == that.geohashPrecision &&
|
||||||
geohashPrefixEnabled == that.geohashPrefixEnabled &&
|
geohashPrefixEnabled == that.geohashPrefixEnabled &&
|
||||||
validateLon == that.validateLon &&
|
coerce == that.coerce &&
|
||||||
validateLat == that.validateLat &&
|
ignoreMalformed == that.ignoreMalformed &&
|
||||||
normalizeLon == that.normalizeLon &&
|
|
||||||
normalizeLat == that.normalizeLat &&
|
|
||||||
java.util.Objects.equals(geohashFieldType, that.geohashFieldType) &&
|
java.util.Objects.equals(geohashFieldType, that.geohashFieldType) &&
|
||||||
java.util.Objects.equals(latFieldType, that.latFieldType) &&
|
java.util.Objects.equals(latFieldType, that.latFieldType) &&
|
||||||
java.util.Objects.equals(lonFieldType, that.lonFieldType);
|
java.util.Objects.equals(lonFieldType, that.lonFieldType);
|
||||||
|
@ -323,7 +336,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return java.util.Objects.hash(super.hashCode(), geohashFieldType, geohashPrecision, geohashPrefixEnabled, latFieldType, lonFieldType, validateLon, validateLat, normalizeLon, normalizeLat);
|
return java.util.Objects.hash(super.hashCode(), geohashFieldType, geohashPrecision, geohashPrefixEnabled, latFieldType,
|
||||||
|
lonFieldType, coerce, ignoreMalformed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -347,22 +361,10 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
if (isGeohashPrefixEnabled() != other.isGeohashPrefixEnabled()) {
|
if (isGeohashPrefixEnabled() != other.isGeohashPrefixEnabled()) {
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different geohash_prefix");
|
conflicts.add("mapper [" + names().fullName() + "] has different geohash_prefix");
|
||||||
}
|
}
|
||||||
if (normalizeLat() != other.normalizeLat()) {
|
if (isLatLonEnabled() && other.isLatLonEnabled() &&
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different normalize_lat");
|
|
||||||
}
|
|
||||||
if (normalizeLon() != other.normalizeLon()) {
|
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different normalize_lon");
|
|
||||||
}
|
|
||||||
if (isLatLonEnabled() &&
|
|
||||||
latFieldType().numericPrecisionStep() != other.latFieldType().numericPrecisionStep()) {
|
latFieldType().numericPrecisionStep() != other.latFieldType().numericPrecisionStep()) {
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different precision_step");
|
conflicts.add("mapper [" + names().fullName() + "] has different precision_step");
|
||||||
}
|
}
|
||||||
if (validateLat() != other.validateLat()) {
|
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different validate_lat");
|
|
||||||
}
|
|
||||||
if (validateLon() != other.validateLon()) {
|
|
||||||
conflicts.add("mapper [" + names().fullName() + "] has different validate_lon");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isGeohashEnabled() {
|
public boolean isGeohashEnabled() {
|
||||||
|
@ -406,40 +408,22 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
this.lonFieldType = lonFieldType;
|
this.lonFieldType = lonFieldType;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean validateLon() {
|
public boolean coerce() {
|
||||||
return validateLon;
|
return this.coerce;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setValidateLon(boolean validateLon) {
|
public void setCoerce(boolean coerce) {
|
||||||
checkIfFrozen();
|
checkIfFrozen();
|
||||||
this.validateLon = validateLon;
|
this.coerce = coerce;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean validateLat() {
|
public boolean ignoreMalformed() {
|
||||||
return validateLat;
|
return this.ignoreMalformed;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setValidateLat(boolean validateLat) {
|
public void setIgnoreMalformed(boolean ignoreMalformed) {
|
||||||
checkIfFrozen();
|
checkIfFrozen();
|
||||||
this.validateLat = validateLat;
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
}
|
|
||||||
|
|
||||||
public boolean normalizeLon() {
|
|
||||||
return normalizeLon;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setNormalizeLon(boolean normalizeLon) {
|
|
||||||
checkIfFrozen();
|
|
||||||
this.normalizeLon = normalizeLon;
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean normalizeLat() {
|
|
||||||
return normalizeLat;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setNormalizeLat(boolean normalizeLat) {
|
|
||||||
checkIfFrozen();
|
|
||||||
this.normalizeLat = normalizeLat;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -586,7 +570,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
private final StringFieldMapper geohashMapper;
|
private final StringFieldMapper geohashMapper;
|
||||||
|
|
||||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||||
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geohashMapper,MultiFields multiFields) {
|
ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geohashMapper,
|
||||||
|
MultiFields multiFields) {
|
||||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, null);
|
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, null);
|
||||||
this.pathType = pathType;
|
this.pathType = pathType;
|
||||||
this.latMapper = latMapper;
|
this.latMapper = latMapper;
|
||||||
|
@ -680,21 +665,22 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
}
|
}
|
||||||
|
|
||||||
private void parse(ParseContext context, GeoPoint point, String geohash) throws IOException {
|
private void parse(ParseContext context, GeoPoint point, String geohash) throws IOException {
|
||||||
if (fieldType().normalizeLat() || fieldType().normalizeLon()) {
|
if (fieldType().ignoreMalformed == false) {
|
||||||
GeoUtils.normalizePoint(point, fieldType().normalizeLat(), fieldType().normalizeLon());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fieldType().validateLat()) {
|
|
||||||
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
|
throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (fieldType().validateLon()) {
|
|
||||||
if (point.lon() > 180.0 || point.lon() < -180) {
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
|
throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (fieldType().coerce) {
|
||||||
|
// by setting coerce to false we are assuming all geopoints are already in a valid coordinate system
|
||||||
|
// thus this extra step can be skipped
|
||||||
|
// LUCENE WATCH: This will be folded back into Lucene's GeoPointField
|
||||||
|
GeoUtils.normalizePoint(point, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||||
Field field = new Field(fieldType().names().indexName(), Double.toString(point.lat()) + ',' + Double.toString(point.lon()), fieldType());
|
Field field = new Field(fieldType().names().indexName(), Double.toString(point.lat()) + ',' + Double.toString(point.lon()), fieldType());
|
||||||
context.doc().add(field);
|
context.doc().add(field);
|
||||||
|
@ -755,33 +741,11 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != NumericUtils.PRECISION_STEP_DEFAULT)) {
|
if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != NumericUtils.PRECISION_STEP_DEFAULT)) {
|
||||||
builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
|
builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
|
||||||
}
|
}
|
||||||
if (includeDefaults || fieldType().validateLat() != Defaults.VALIDATE_LAT || fieldType().validateLon() != Defaults.VALIDATE_LON) {
|
if (includeDefaults || fieldType().coerce != Defaults.COERCE) {
|
||||||
if (fieldType().validateLat() && fieldType().validateLon()) {
|
builder.field(Names.COERCE, fieldType().coerce);
|
||||||
builder.field("validate", true);
|
|
||||||
} else if (!fieldType().validateLat() && !fieldType().validateLon()) {
|
|
||||||
builder.field("validate", false);
|
|
||||||
} else {
|
|
||||||
if (includeDefaults || fieldType().validateLat() != Defaults.VALIDATE_LAT) {
|
|
||||||
builder.field("validate_lat", fieldType().validateLat());
|
|
||||||
}
|
|
||||||
if (includeDefaults || fieldType().validateLon() != Defaults.VALIDATE_LON) {
|
|
||||||
builder.field("validate_lon", fieldType().validateLon());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (includeDefaults || fieldType().normalizeLat() != Defaults.NORMALIZE_LAT || fieldType().normalizeLon() != Defaults.NORMALIZE_LON) {
|
if (includeDefaults || fieldType().ignoreMalformed != Defaults.IGNORE_MALFORMED) {
|
||||||
if (fieldType().normalizeLat() && fieldType().normalizeLon()) {
|
builder.field(Names.IGNORE_MALFORMED, fieldType().ignoreMalformed);
|
||||||
builder.field("normalize", true);
|
|
||||||
} else if (!fieldType().normalizeLat() && !fieldType().normalizeLon()) {
|
|
||||||
builder.field("normalize", false);
|
|
||||||
} else {
|
|
||||||
if (includeDefaults || fieldType().normalizeLat() != Defaults.NORMALIZE_LAT) {
|
|
||||||
builder.field("normalize_lat", fieldType().normalizeLat());
|
|
||||||
}
|
|
||||||
if (includeDefaults || fieldType().normalizeLon() != Defaults.NORMALIZE_LON) {
|
|
||||||
builder.field("normalize_lon", fieldType().normalizeLon());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -812,5 +776,4 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
|
||||||
return new BytesRef(bytes);
|
return new BytesRef(bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,8 @@ public class GeoBoundingBoxQueryBuilder extends QueryBuilder {
|
||||||
|
|
||||||
private String queryName;
|
private String queryName;
|
||||||
private String type;
|
private String type;
|
||||||
|
private Boolean coerce;
|
||||||
|
private Boolean ignoreMalformed;
|
||||||
|
|
||||||
public GeoBoundingBoxQueryBuilder(String name) {
|
public GeoBoundingBoxQueryBuilder(String name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
|
@ -134,6 +136,16 @@ public class GeoBoundingBoxQueryBuilder extends QueryBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public GeoBoundingBoxQueryBuilder coerce(boolean coerce) {
|
||||||
|
this.coerce = coerce;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public GeoBoundingBoxQueryBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||||
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults
|
* Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults
|
||||||
* to `memory`.
|
* to `memory`.
|
||||||
|
@ -169,6 +181,12 @@ public class GeoBoundingBoxQueryBuilder extends QueryBuilder {
|
||||||
if (type != null) {
|
if (type != null) {
|
||||||
builder.field("type", type);
|
builder.field("type", type);
|
||||||
}
|
}
|
||||||
|
if (coerce != null) {
|
||||||
|
builder.field("coerce", coerce);
|
||||||
|
}
|
||||||
|
if (ignoreMalformed != null) {
|
||||||
|
builder.field("ignore_malformed", ignoreMalformed);
|
||||||
|
}
|
||||||
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,12 +21,12 @@ package org.elasticsearch.index.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.geo.GeoUtils;
|
import org.elasticsearch.common.geo.GeoUtils;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||||
import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxQuery;
|
import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxQuery;
|
||||||
|
@ -81,7 +81,9 @@ public class GeoBoundingBoxQueryParser implements QueryParser {
|
||||||
String queryName = null;
|
String queryName = null;
|
||||||
String currentFieldName = null;
|
String currentFieldName = null;
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
boolean normalize = true;
|
final boolean indexCreatedBeforeV2_0 = parseContext.indexVersionCreated().before(Version.V_2_0_0);
|
||||||
|
boolean coerce = false;
|
||||||
|
boolean ignoreMalformed = false;
|
||||||
|
|
||||||
GeoPoint sparse = new GeoPoint();
|
GeoPoint sparse = new GeoPoint();
|
||||||
|
|
||||||
|
@ -137,10 +139,15 @@ public class GeoBoundingBoxQueryParser implements QueryParser {
|
||||||
} else if (token.isValue()) {
|
} else if (token.isValue()) {
|
||||||
if ("_name".equals(currentFieldName)) {
|
if ("_name".equals(currentFieldName)) {
|
||||||
queryName = parser.text();
|
queryName = parser.text();
|
||||||
} else if ("normalize".equals(currentFieldName)) {
|
} else if ("coerce".equals(currentFieldName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentFieldName))) {
|
||||||
normalize = parser.booleanValue();
|
coerce = parser.booleanValue();
|
||||||
|
if (coerce == true) {
|
||||||
|
ignoreMalformed = true;
|
||||||
|
}
|
||||||
} else if ("type".equals(currentFieldName)) {
|
} else if ("type".equals(currentFieldName)) {
|
||||||
type = parser.text();
|
type = parser.text();
|
||||||
|
} else if ("ignore_malformed".equals(currentFieldName) && coerce == false) {
|
||||||
|
ignoreMalformed = parser.booleanValue();
|
||||||
} else {
|
} else {
|
||||||
throw new QueryParsingException(parseContext, "failed to parse [{}] query. unexpected field [{}]", NAME, currentFieldName);
|
throw new QueryParsingException(parseContext, "failed to parse [{}] query. unexpected field [{}]", NAME, currentFieldName);
|
||||||
}
|
}
|
||||||
|
@ -150,8 +157,24 @@ public class GeoBoundingBoxQueryParser implements QueryParser {
|
||||||
final GeoPoint topLeft = sparse.reset(top, left); //just keep the object
|
final GeoPoint topLeft = sparse.reset(top, left); //just keep the object
|
||||||
final GeoPoint bottomRight = new GeoPoint(bottom, right);
|
final GeoPoint bottomRight = new GeoPoint(bottom, right);
|
||||||
|
|
||||||
if (normalize) {
|
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||||
// Special case: if the difference bettween the left and right is 360 and the right is greater than the left, we are asking for
|
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
|
||||||
|
if (topLeft.lat() > 90.0 || topLeft.lat() < -90.0) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal latitude value [{}] for [{}]", topLeft.lat(), NAME);
|
||||||
|
}
|
||||||
|
if (topLeft.lon() > 180.0 || topLeft.lon() < -180) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal longitude value [{}] for [{}]", topLeft.lon(), NAME);
|
||||||
|
}
|
||||||
|
if (bottomRight.lat() > 90.0 || bottomRight.lat() < -90.0) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal latitude value [{}] for [{}]", bottomRight.lat(), NAME);
|
||||||
|
}
|
||||||
|
if (bottomRight.lon() > 180.0 || bottomRight.lon() < -180) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal longitude value [{}] for [{}]", bottomRight.lon(), NAME);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coerce) {
|
||||||
|
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
|
||||||
// the complete longitude range so need to set longitude to the complete longditude range
|
// the complete longitude range so need to set longitude to the complete longditude range
|
||||||
boolean completeLonRange = ((right - left) % 360 == 0 && right > left);
|
boolean completeLonRange = ((right - left) % 360 == 0 && right > left);
|
||||||
GeoUtils.normalizePoint(topLeft, true, !completeLonRange);
|
GeoUtils.normalizePoint(topLeft, true, !completeLonRange);
|
||||||
|
|
|
@ -44,6 +44,10 @@ public class GeoDistanceQueryBuilder extends QueryBuilder {
|
||||||
|
|
||||||
private String queryName;
|
private String queryName;
|
||||||
|
|
||||||
|
private Boolean coerce;
|
||||||
|
|
||||||
|
private Boolean ignoreMalformed;
|
||||||
|
|
||||||
public GeoDistanceQueryBuilder(String name) {
|
public GeoDistanceQueryBuilder(String name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
@ -97,6 +101,16 @@ public class GeoDistanceQueryBuilder extends QueryBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public GeoDistanceQueryBuilder coerce(boolean coerce) {
|
||||||
|
this.coerce = coerce;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public GeoDistanceQueryBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||||
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject(GeoDistanceQueryParser.NAME);
|
builder.startObject(GeoDistanceQueryParser.NAME);
|
||||||
|
@ -115,6 +129,12 @@ public class GeoDistanceQueryBuilder extends QueryBuilder {
|
||||||
if (queryName != null) {
|
if (queryName != null) {
|
||||||
builder.field("_name", queryName);
|
builder.field("_name", queryName);
|
||||||
}
|
}
|
||||||
|
if (coerce != null) {
|
||||||
|
builder.field("coerce", coerce);
|
||||||
|
}
|
||||||
|
if (ignoreMalformed != null) {
|
||||||
|
builder.field("ignore_malformed", ignoreMalformed);
|
||||||
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.index.query;
|
package org.elasticsearch.index.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.geo.GeoDistance;
|
import org.elasticsearch.common.geo.GeoDistance;
|
||||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
|
@ -28,7 +29,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.unit.DistanceUnit;
|
import org.elasticsearch.common.unit.DistanceUnit;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||||
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
|
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
|
||||||
|
@ -71,8 +71,9 @@ public class GeoDistanceQueryParser implements QueryParser {
|
||||||
DistanceUnit unit = DistanceUnit.DEFAULT;
|
DistanceUnit unit = DistanceUnit.DEFAULT;
|
||||||
GeoDistance geoDistance = GeoDistance.DEFAULT;
|
GeoDistance geoDistance = GeoDistance.DEFAULT;
|
||||||
String optimizeBbox = "memory";
|
String optimizeBbox = "memory";
|
||||||
boolean normalizeLon = true;
|
final boolean indexCreatedBeforeV2_0 = parseContext.indexVersionCreated().before(Version.V_2_0_0);
|
||||||
boolean normalizeLat = true;
|
boolean coerce = false;
|
||||||
|
boolean ignoreMalformed = false;
|
||||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
if (token == XContentParser.Token.FIELD_NAME) {
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
currentFieldName = parser.currentName();
|
currentFieldName = parser.currentName();
|
||||||
|
@ -125,9 +126,13 @@ public class GeoDistanceQueryParser implements QueryParser {
|
||||||
queryName = parser.text();
|
queryName = parser.text();
|
||||||
} else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
|
} else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
|
||||||
optimizeBbox = parser.textOrNull();
|
optimizeBbox = parser.textOrNull();
|
||||||
} else if ("normalize".equals(currentFieldName)) {
|
} else if ("coerce".equals(currentFieldName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentFieldName))) {
|
||||||
normalizeLat = parser.booleanValue();
|
coerce = parser.booleanValue();
|
||||||
normalizeLon = parser.booleanValue();
|
if (coerce == true) {
|
||||||
|
ignoreMalformed = true;
|
||||||
|
}
|
||||||
|
} else if ("ignore_malformed".equals(currentFieldName) && coerce == false) {
|
||||||
|
ignoreMalformed = parser.booleanValue();
|
||||||
} else {
|
} else {
|
||||||
point.resetFromString(parser.text());
|
point.resetFromString(parser.text());
|
||||||
fieldName = currentFieldName;
|
fieldName = currentFieldName;
|
||||||
|
@ -135,6 +140,20 @@ public class GeoDistanceQueryParser implements QueryParser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||||
|
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
|
||||||
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal latitude value [{}] for [{}]", point.lat(), NAME);
|
||||||
|
}
|
||||||
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal longitude value [{}] for [{}]", point.lon(), NAME);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coerce) {
|
||||||
|
GeoUtils.normalizePoint(point, coerce, coerce);
|
||||||
|
}
|
||||||
|
|
||||||
if (vDistance == null) {
|
if (vDistance == null) {
|
||||||
throw new QueryParsingException(parseContext, "geo_distance requires 'distance' to be specified");
|
throw new QueryParsingException(parseContext, "geo_distance requires 'distance' to be specified");
|
||||||
} else if (vDistance instanceof Number) {
|
} else if (vDistance instanceof Number) {
|
||||||
|
@ -144,10 +163,6 @@ public class GeoDistanceQueryParser implements QueryParser {
|
||||||
}
|
}
|
||||||
distance = geoDistance.normalize(distance, DistanceUnit.DEFAULT);
|
distance = geoDistance.normalize(distance, DistanceUnit.DEFAULT);
|
||||||
|
|
||||||
if (normalizeLat || normalizeLon) {
|
|
||||||
GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
|
|
||||||
}
|
|
||||||
|
|
||||||
MappedFieldType fieldType = parseContext.fieldMapper(fieldName);
|
MappedFieldType fieldType = parseContext.fieldMapper(fieldName);
|
||||||
if (fieldType == null) {
|
if (fieldType == null) {
|
||||||
throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]");
|
throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]");
|
||||||
|
|
|
@ -46,6 +46,10 @@ public class GeoDistanceRangeQueryBuilder extends QueryBuilder {
|
||||||
|
|
||||||
private String optimizeBbox;
|
private String optimizeBbox;
|
||||||
|
|
||||||
|
private Boolean coerce;
|
||||||
|
|
||||||
|
private Boolean ignoreMalformed;
|
||||||
|
|
||||||
public GeoDistanceRangeQueryBuilder(String name) {
|
public GeoDistanceRangeQueryBuilder(String name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
@ -125,6 +129,16 @@ public class GeoDistanceRangeQueryBuilder extends QueryBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public GeoDistanceRangeQueryBuilder coerce(boolean coerce) {
|
||||||
|
this.coerce = coerce;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public GeoDistanceRangeQueryBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||||
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the filter name for the filter that can be used when searching for matched_filters per hit.
|
* Sets the filter name for the filter that can be used when searching for matched_filters per hit.
|
||||||
*/
|
*/
|
||||||
|
@ -154,6 +168,12 @@ public class GeoDistanceRangeQueryBuilder extends QueryBuilder {
|
||||||
if (queryName != null) {
|
if (queryName != null) {
|
||||||
builder.field("_name", queryName);
|
builder.field("_name", queryName);
|
||||||
}
|
}
|
||||||
|
if (coerce != null) {
|
||||||
|
builder.field("coerce", coerce);
|
||||||
|
}
|
||||||
|
if (ignoreMalformed != null) {
|
||||||
|
builder.field("ignore_malformed", ignoreMalformed);
|
||||||
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.index.query;
|
package org.elasticsearch.index.query;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.geo.GeoDistance;
|
import org.elasticsearch.common.geo.GeoDistance;
|
||||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
|
@ -28,7 +29,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.unit.DistanceUnit;
|
import org.elasticsearch.common.unit.DistanceUnit;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||||
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
|
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
|
||||||
|
@ -73,8 +73,9 @@ public class GeoDistanceRangeQueryParser implements QueryParser {
|
||||||
DistanceUnit unit = DistanceUnit.DEFAULT;
|
DistanceUnit unit = DistanceUnit.DEFAULT;
|
||||||
GeoDistance geoDistance = GeoDistance.DEFAULT;
|
GeoDistance geoDistance = GeoDistance.DEFAULT;
|
||||||
String optimizeBbox = "memory";
|
String optimizeBbox = "memory";
|
||||||
boolean normalizeLon = true;
|
final boolean indexCreatedBeforeV2_0 = parseContext.indexVersionCreated().before(Version.V_2_0_0);
|
||||||
boolean normalizeLat = true;
|
boolean coerce = false;
|
||||||
|
boolean ignoreMalformed = false;
|
||||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
if (token == XContentParser.Token.FIELD_NAME) {
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
currentFieldName = parser.currentName();
|
currentFieldName = parser.currentName();
|
||||||
|
@ -155,9 +156,13 @@ public class GeoDistanceRangeQueryParser implements QueryParser {
|
||||||
queryName = parser.text();
|
queryName = parser.text();
|
||||||
} else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
|
} else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) {
|
||||||
optimizeBbox = parser.textOrNull();
|
optimizeBbox = parser.textOrNull();
|
||||||
} else if ("normalize".equals(currentFieldName)) {
|
} else if ("coerce".equals(currentFieldName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentFieldName))) {
|
||||||
normalizeLat = parser.booleanValue();
|
coerce = parser.booleanValue();
|
||||||
normalizeLon = parser.booleanValue();
|
if (coerce == true) {
|
||||||
|
ignoreMalformed = true;
|
||||||
|
}
|
||||||
|
} else if ("ignore_malformed".equals(currentFieldName) && coerce == false) {
|
||||||
|
ignoreMalformed = parser.booleanValue();
|
||||||
} else {
|
} else {
|
||||||
point.resetFromString(parser.text());
|
point.resetFromString(parser.text());
|
||||||
fieldName = currentFieldName;
|
fieldName = currentFieldName;
|
||||||
|
@ -165,6 +170,20 @@ public class GeoDistanceRangeQueryParser implements QueryParser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||||
|
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
|
||||||
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal latitude value [{}] for [{}]", point.lat(), NAME);
|
||||||
|
}
|
||||||
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal longitude value [{}] for [{}]", point.lon(), NAME);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coerce) {
|
||||||
|
GeoUtils.normalizePoint(point, coerce, coerce);
|
||||||
|
}
|
||||||
|
|
||||||
Double from = null;
|
Double from = null;
|
||||||
Double to = null;
|
Double to = null;
|
||||||
if (vFrom != null) {
|
if (vFrom != null) {
|
||||||
|
@ -184,10 +203,6 @@ public class GeoDistanceRangeQueryParser implements QueryParser {
|
||||||
to = geoDistance.normalize(to, DistanceUnit.DEFAULT);
|
to = geoDistance.normalize(to, DistanceUnit.DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (normalizeLat || normalizeLon) {
|
|
||||||
GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
|
|
||||||
}
|
|
||||||
|
|
||||||
MappedFieldType fieldType = parseContext.fieldMapper(fieldName);
|
MappedFieldType fieldType = parseContext.fieldMapper(fieldName);
|
||||||
if (fieldType == null) {
|
if (fieldType == null) {
|
||||||
throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]");
|
throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]");
|
||||||
|
|
|
@ -38,6 +38,10 @@ public class GeoPolygonQueryBuilder extends QueryBuilder {
|
||||||
|
|
||||||
private String queryName;
|
private String queryName;
|
||||||
|
|
||||||
|
private Boolean coerce;
|
||||||
|
|
||||||
|
private Boolean ignoreMalformed;
|
||||||
|
|
||||||
public GeoPolygonQueryBuilder(String name) {
|
public GeoPolygonQueryBuilder(String name) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
@ -70,6 +74,16 @@ public class GeoPolygonQueryBuilder extends QueryBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public GeoPolygonQueryBuilder coerce(boolean coerce) {
|
||||||
|
this.coerce = coerce;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public GeoPolygonQueryBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||||
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject(GeoPolygonQueryParser.NAME);
|
builder.startObject(GeoPolygonQueryParser.NAME);
|
||||||
|
@ -85,6 +99,12 @@ public class GeoPolygonQueryBuilder extends QueryBuilder {
|
||||||
if (queryName != null) {
|
if (queryName != null) {
|
||||||
builder.field("_name", queryName);
|
builder.field("_name", queryName);
|
||||||
}
|
}
|
||||||
|
if (coerce != null) {
|
||||||
|
builder.field("coerce", coerce);
|
||||||
|
}
|
||||||
|
if (ignoreMalformed != null) {
|
||||||
|
builder.field("ignore_malformed", ignoreMalformed);
|
||||||
|
}
|
||||||
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,13 +22,13 @@ package org.elasticsearch.index.query;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.geo.GeoUtils;
|
import org.elasticsearch.common.geo.GeoUtils;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||||
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||||
import org.elasticsearch.index.search.geo.GeoPolygonQuery;
|
import org.elasticsearch.index.search.geo.GeoPolygonQuery;
|
||||||
|
@ -70,9 +70,9 @@ public class GeoPolygonQueryParser implements QueryParser {
|
||||||
|
|
||||||
List<GeoPoint> shell = Lists.newArrayList();
|
List<GeoPoint> shell = Lists.newArrayList();
|
||||||
|
|
||||||
boolean normalizeLon = true;
|
final boolean indexCreatedBeforeV2_0 = parseContext.indexVersionCreated().before(Version.V_2_0_0);
|
||||||
boolean normalizeLat = true;
|
boolean coerce = false;
|
||||||
|
boolean ignoreMalformed = false;
|
||||||
String queryName = null;
|
String queryName = null;
|
||||||
String currentFieldName = null;
|
String currentFieldName = null;
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
|
@ -108,9 +108,13 @@ public class GeoPolygonQueryParser implements QueryParser {
|
||||||
} else if (token.isValue()) {
|
} else if (token.isValue()) {
|
||||||
if ("_name".equals(currentFieldName)) {
|
if ("_name".equals(currentFieldName)) {
|
||||||
queryName = parser.text();
|
queryName = parser.text();
|
||||||
} else if ("normalize".equals(currentFieldName)) {
|
} else if ("coerce".equals(currentFieldName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentFieldName))) {
|
||||||
normalizeLat = parser.booleanValue();
|
coerce = parser.booleanValue();
|
||||||
normalizeLon = parser.booleanValue();
|
if (coerce == true) {
|
||||||
|
ignoreMalformed = true;
|
||||||
|
}
|
||||||
|
} else if ("ignore_malformed".equals(currentFieldName) && coerce == false) {
|
||||||
|
ignoreMalformed = parser.booleanValue();
|
||||||
} else {
|
} else {
|
||||||
throw new QueryParsingException(parseContext, "[geo_polygon] query does not support [" + currentFieldName + "]");
|
throw new QueryParsingException(parseContext, "[geo_polygon] query does not support [" + currentFieldName + "]");
|
||||||
}
|
}
|
||||||
|
@ -134,9 +138,21 @@ public class GeoPolygonQueryParser implements QueryParser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (normalizeLat || normalizeLon) {
|
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||||
|
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
|
||||||
for (GeoPoint point : shell) {
|
for (GeoPoint point : shell) {
|
||||||
GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal latitude value [{}] for [{}]", point.lat(), NAME);
|
||||||
|
}
|
||||||
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
|
throw new QueryParsingException(parseContext, "illegal longitude value [{}] for [{}]", point.lon(), NAME);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coerce) {
|
||||||
|
for (GeoPoint point : shell) {
|
||||||
|
GeoUtils.normalizePoint(point, coerce, coerce);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -258,6 +258,7 @@ public class HasChildQueryParser implements QueryParser {
|
||||||
String joinField = ParentFieldMapper.joinField(parentType);
|
String joinField = ParentFieldMapper.joinField(parentType);
|
||||||
IndexReader indexReader = searchContext.searcher().getIndexReader();
|
IndexReader indexReader = searchContext.searcher().getIndexReader();
|
||||||
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
|
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
|
||||||
|
indexSearcher.setQueryCache(null);
|
||||||
IndexParentChildFieldData indexParentChildFieldData = parentChildIndexFieldData.loadGlobal(indexReader);
|
IndexParentChildFieldData indexParentChildFieldData = parentChildIndexFieldData.loadGlobal(indexReader);
|
||||||
MultiDocValues.OrdinalMap ordinalMap = ParentChildIndexFieldData.getOrdinalMap(indexParentChildFieldData, parentType);
|
MultiDocValues.OrdinalMap ordinalMap = ParentChildIndexFieldData.getOrdinalMap(indexParentChildFieldData, parentType);
|
||||||
return JoinUtil.createJoinQuery(joinField, innerQuery, toQuery, indexSearcher, scoreMode, ordinalMap, minChildren, maxChildren);
|
return JoinUtil.createJoinQuery(joinField, innerQuery, toQuery, indexSearcher, scoreMode, ordinalMap, minChildren, maxChildren);
|
||||||
|
|
|
@ -68,10 +68,6 @@ public class NotQueryParser implements QueryParser {
|
||||||
// its the filter, and the name is the field
|
// its the filter, and the name is the field
|
||||||
query = parseContext.parseInnerFilter(currentFieldName);
|
query = parseContext.parseInnerFilter(currentFieldName);
|
||||||
}
|
}
|
||||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
|
||||||
queryFound = true;
|
|
||||||
// its the filter, and the name is the field
|
|
||||||
query = parseContext.parseInnerFilter(currentFieldName);
|
|
||||||
} else if (token.isValue()) {
|
} else if (token.isValue()) {
|
||||||
if ("_name".equals(currentFieldName)) {
|
if ("_name".equals(currentFieldName)) {
|
||||||
queryName = parser.text();
|
queryName = parser.text();
|
||||||
|
|
|
@ -38,8 +38,6 @@ public class TermsQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
||||||
|
|
||||||
private String queryName;
|
private String queryName;
|
||||||
|
|
||||||
private String execution;
|
|
||||||
|
|
||||||
private float boost = -1;
|
private float boost = -1;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -118,17 +116,6 @@ public class TermsQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
||||||
this.values = values;
|
this.values = values;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the execution mode for the terms filter. Cane be either "plain", "bool"
|
|
||||||
* "and". Defaults to "plain".
|
|
||||||
* @deprecated elasticsearch now makes better decisions on its own
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public TermsQueryBuilder execution(String execution) {
|
|
||||||
this.execution = execution;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the minimum number of matches across the provided terms. Defaults to <tt>1</tt>.
|
* Sets the minimum number of matches across the provided terms. Defaults to <tt>1</tt>.
|
||||||
* @deprecated use [bool] query instead
|
* @deprecated use [bool] query instead
|
||||||
|
@ -168,10 +155,6 @@ public class TermsQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
||||||
builder.startObject(TermsQueryParser.NAME);
|
builder.startObject(TermsQueryParser.NAME);
|
||||||
builder.field(name, values);
|
builder.field(name, values);
|
||||||
|
|
||||||
if (execution != null) {
|
|
||||||
builder.field("execution", execution);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (minimumShouldMatch != null) {
|
if (minimumShouldMatch != null) {
|
||||||
builder.field("minimum_should_match", minimumShouldMatch);
|
builder.field("minimum_should_match", minimumShouldMatch);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,11 +52,9 @@ public class TermsQueryParser implements QueryParser {
|
||||||
public static final String NAME = "terms";
|
public static final String NAME = "terms";
|
||||||
private static final ParseField MIN_SHOULD_MATCH_FIELD = new ParseField("min_match", "min_should_match").withAllDeprecated("Use [bool] query instead");
|
private static final ParseField MIN_SHOULD_MATCH_FIELD = new ParseField("min_match", "min_should_match").withAllDeprecated("Use [bool] query instead");
|
||||||
private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord").withAllDeprecated("Use [bool] query instead");
|
private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord").withAllDeprecated("Use [bool] query instead");
|
||||||
|
private static final ParseField EXECUTION_FIELD = new ParseField("execution").withAllDeprecated("execution is deprecated and has no effect");
|
||||||
private Client client;
|
private Client client;
|
||||||
|
|
||||||
@Deprecated
|
|
||||||
public static final String EXECUTION_KEY = "execution";
|
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public TermsQueryParser() {
|
public TermsQueryParser() {
|
||||||
}
|
}
|
||||||
|
@ -141,7 +139,7 @@ public class TermsQueryParser implements QueryParser {
|
||||||
throw new QueryParsingException(parseContext, "[terms] query lookup element requires specifying the path");
|
throw new QueryParsingException(parseContext, "[terms] query lookup element requires specifying the path");
|
||||||
}
|
}
|
||||||
} else if (token.isValue()) {
|
} else if (token.isValue()) {
|
||||||
if (EXECUTION_KEY.equals(currentFieldName)) {
|
if (parseContext.parseFieldMatcher().match(currentFieldName, EXECUTION_FIELD)) {
|
||||||
// ignore
|
// ignore
|
||||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, MIN_SHOULD_MATCH_FIELD)) {
|
} else if (parseContext.parseFieldMatcher().match(currentFieldName, MIN_SHOULD_MATCH_FIELD)) {
|
||||||
if (minShouldMatch != null) {
|
if (minShouldMatch != null) {
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.update.UpdateHelper;
|
||||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||||
import org.elasticsearch.common.geo.ShapesAvailability;
|
import org.elasticsearch.common.geo.ShapesAvailability;
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
import org.elasticsearch.common.inject.AbstractModule;
|
||||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.ExtensionPoint;
|
import org.elasticsearch.common.util.ExtensionPoint;
|
||||||
import org.elasticsearch.index.query.*;
|
import org.elasticsearch.index.query.*;
|
||||||
|
@ -38,6 +37,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||||
|
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||||
import org.elasticsearch.indices.recovery.RecoverySource;
|
import org.elasticsearch.indices.recovery.RecoverySource;
|
||||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||||
|
@ -45,10 +45,6 @@ import org.elasticsearch.indices.store.IndicesStore;
|
||||||
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
||||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||||
|
|
||||||
import java.security.cert.Extension;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configures classes and services that are shared by indices on each node.
|
* Configures classes and services that are shared by indices on each node.
|
||||||
*/
|
*/
|
||||||
|
@ -159,6 +155,7 @@ public class IndicesModule extends AbstractModule {
|
||||||
|
|
||||||
protected void bindQueryParsersExtension() {
|
protected void bindQueryParsersExtension() {
|
||||||
queryParsers.bind(binder());
|
queryParsers.bind(binder());
|
||||||
|
bind(IndicesQueriesRegistry.class).asEagerSingleton();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void bindHunspellExtension() {
|
protected void bindHunspellExtension() {
|
||||||
|
|
|
@ -75,18 +75,19 @@ public class PluginManager {
|
||||||
|
|
||||||
static final ImmutableSet<String> OFFICIAL_PLUGINS = ImmutableSet.<String>builder()
|
static final ImmutableSet<String> OFFICIAL_PLUGINS = ImmutableSet.<String>builder()
|
||||||
.add(
|
.add(
|
||||||
"elasticsearch-analysis-icu",
|
"analysis-icu",
|
||||||
"elasticsearch-analysis-kuromoji",
|
"analysis-kuromoji",
|
||||||
"elasticsearch-analysis-phonetic",
|
"analysis-phonetic",
|
||||||
"elasticsearch-analysis-smartcn",
|
"analysis-smartcn",
|
||||||
"elasticsearch-analysis-stempel",
|
"analysis-stempel",
|
||||||
"elasticsearch-cloud-aws",
|
"cloud-aws",
|
||||||
"elasticsearch-cloud-azure",
|
"cloud-azure",
|
||||||
"elasticsearch-cloud-gce",
|
"cloud-gce",
|
||||||
"elasticsearch-delete-by-query",
|
"delete-by-query",
|
||||||
"elasticsearch-lang-javascript",
|
"lang-javascript",
|
||||||
"elasticsearch-lang-python",
|
"lang-python",
|
||||||
"elasticsearch-mapper-size"
|
"mapper-murmur3",
|
||||||
|
"mapper-size"
|
||||||
).build();
|
).build();
|
||||||
|
|
||||||
private final Environment environment;
|
private final Environment environment;
|
||||||
|
@ -162,7 +163,7 @@ public class PluginManager {
|
||||||
terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e));
|
terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (PluginHandle.isOfficialPlugin(pluginHandle.repo, pluginHandle.user, pluginHandle.version)) {
|
if (PluginHandle.isOfficialPlugin(pluginHandle.name, pluginHandle.user, pluginHandle.version)) {
|
||||||
checkForOfficialPlugins(pluginHandle.name);
|
checkForOfficialPlugins(pluginHandle.name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -437,43 +438,41 @@ public class PluginManager {
|
||||||
*/
|
*/
|
||||||
static class PluginHandle {
|
static class PluginHandle {
|
||||||
|
|
||||||
final String name;
|
|
||||||
final String version;
|
final String version;
|
||||||
final String user;
|
final String user;
|
||||||
final String repo;
|
final String name;
|
||||||
|
|
||||||
PluginHandle(String name, String version, String user, String repo) {
|
PluginHandle(String name, String version, String user) {
|
||||||
this.name = name;
|
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.user = user;
|
this.user = user;
|
||||||
this.repo = repo;
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
List<URL> urls() {
|
List<URL> urls() {
|
||||||
List<URL> urls = new ArrayList<>();
|
List<URL> urls = new ArrayList<>();
|
||||||
if (version != null) {
|
if (version != null) {
|
||||||
// Elasticsearch new download service uses groupId org.elasticsearch.plugins from 2.0.0
|
// Elasticsearch new download service uses groupId org.elasticsearch.plugin from 2.0.0
|
||||||
if (user == null) {
|
if (user == null) {
|
||||||
// TODO Update to https
|
// TODO Update to https
|
||||||
if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) {
|
if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) {
|
||||||
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/elasticsearch-%s-%s/org/elasticsearch/plugin/elasticsearch-%s/%s/elasticsearch-%s-%s.zip", version, Build.CURRENT.hashShort(), repo, version, repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.hashShort(), name, version, name, version));
|
||||||
}
|
}
|
||||||
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/elasticsearch-%s/%s/elasticsearch-%s-%s.zip", repo, version, repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version));
|
||||||
} else {
|
} else {
|
||||||
// Elasticsearch old download service
|
// Elasticsearch old download service
|
||||||
// TODO Update to https
|
// TODO Update to https
|
||||||
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version));
|
||||||
// Maven central repository
|
// Maven central repository
|
||||||
addUrl(urls, String.format(Locale.ROOT, "http://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "http://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
|
||||||
// Sonatype repository
|
// Sonatype repository
|
||||||
addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
|
||||||
// Github repository
|
// Github repository
|
||||||
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/%3$s.zip", user, repo, version));
|
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/%3$s.zip", user, name, version));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (user != null) {
|
if (user != null) {
|
||||||
// Github repository for master branch (assume site)
|
// Github repository for master branch (assume site)
|
||||||
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/master.zip", user, repo));
|
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/master.zip", user, name));
|
||||||
}
|
}
|
||||||
return urls;
|
return urls;
|
||||||
}
|
}
|
||||||
|
@ -525,20 +524,11 @@ public class PluginManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
String endname = repo;
|
|
||||||
if (repo.startsWith("elasticsearch-")) {
|
|
||||||
// remove elasticsearch- prefix
|
|
||||||
endname = repo.substring("elasticsearch-".length());
|
|
||||||
} else if (repo.startsWith("es-")) {
|
|
||||||
// remove es- prefix
|
|
||||||
endname = repo.substring("es-".length());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isOfficialPlugin(repo, user, version)) {
|
if (isOfficialPlugin(repo, user, version)) {
|
||||||
return new PluginHandle(endname, Version.CURRENT.number(), null, repo);
|
return new PluginHandle(repo, Version.CURRENT.number(), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
return new PluginHandle(endname, version, user, repo);
|
return new PluginHandle(repo, version, user);
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean isOfficialPlugin(String repo, String user, String version) {
|
static boolean isOfficialPlugin(String repo, String user, String version) {
|
||||||
|
|
|
@ -19,44 +19,33 @@
|
||||||
|
|
||||||
package org.elasticsearch.repositories;
|
package org.elasticsearch.repositories;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import com.google.common.collect.Maps;
|
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus;
|
import org.elasticsearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus;
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
import org.elasticsearch.common.inject.AbstractModule;
|
||||||
import org.elasticsearch.common.inject.Module;
|
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||||
|
import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
|
||||||
import org.elasticsearch.repositories.fs.FsRepository;
|
import org.elasticsearch.repositories.fs.FsRepository;
|
||||||
import org.elasticsearch.repositories.fs.FsRepositoryModule;
|
|
||||||
import org.elasticsearch.repositories.uri.URLRepository;
|
import org.elasticsearch.repositories.uri.URLRepository;
|
||||||
import org.elasticsearch.repositories.uri.URLRepositoryModule;
|
|
||||||
import org.elasticsearch.snapshots.RestoreService;
|
import org.elasticsearch.snapshots.RestoreService;
|
||||||
import org.elasticsearch.snapshots.SnapshotsService;
|
|
||||||
import org.elasticsearch.snapshots.SnapshotShardsService;
|
import org.elasticsearch.snapshots.SnapshotShardsService;
|
||||||
|
import org.elasticsearch.snapshots.SnapshotsService;
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Module responsible for registering other repositories.
|
* Sets up classes for Snapshot/Restore.
|
||||||
* <p/>
|
*
|
||||||
* Repositories implemented as plugins should implement {@code onModule(RepositoriesModule module)} method, in which
|
* Plugins can add custom repository types by calling {@link #registerRepository(String, Class, Class)}.
|
||||||
* they should register repository using {@link #registerRepository(String, Class)} method.
|
|
||||||
*/
|
*/
|
||||||
public class RepositoriesModule extends AbstractModule {
|
public class RepositoriesModule extends AbstractModule {
|
||||||
|
|
||||||
private Map<String, Class<? extends Module>> repositoryTypes = Maps.newHashMap();
|
private final RepositoryTypesRegistry repositoryTypes = new RepositoryTypesRegistry();
|
||||||
|
|
||||||
public RepositoriesModule() {
|
public RepositoriesModule() {
|
||||||
registerRepository(FsRepository.TYPE, FsRepositoryModule.class);
|
registerRepository(FsRepository.TYPE, FsRepository.class, BlobStoreIndexShardRepository.class);
|
||||||
registerRepository(URLRepository.TYPE, URLRepositoryModule.class);
|
registerRepository(URLRepository.TYPE, URLRepository.class, BlobStoreIndexShardRepository.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/** Registers a custom repository type to the given {@link Repository} and {@link IndexShardRepository}. */
|
||||||
* Registers a custom repository type name against a module.
|
public void registerRepository(String type, Class<? extends Repository> repositoryType, Class<? extends IndexShardRepository> shardRepositoryType) {
|
||||||
*
|
repositoryTypes.registerRepository(type, repositoryType, shardRepositoryType);
|
||||||
* @param type The type
|
|
||||||
* @param module The module
|
|
||||||
*/
|
|
||||||
public void registerRepository(String type, Class<? extends Module> module) {
|
|
||||||
repositoryTypes.put(type, module);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -66,6 +55,6 @@ public class RepositoriesModule extends AbstractModule {
|
||||||
bind(SnapshotShardsService.class).asEagerSingleton();
|
bind(SnapshotShardsService.class).asEagerSingleton();
|
||||||
bind(TransportNodesSnapshotsStatus.class).asEagerSingleton();
|
bind(TransportNodesSnapshotsStatus.class).asEagerSingleton();
|
||||||
bind(RestoreService.class).asEagerSingleton();
|
bind(RestoreService.class).asEagerSingleton();
|
||||||
bind(RepositoryTypesRegistry.class).toInstance(new RepositoryTypesRegistry(ImmutableMap.copyOf(repositoryTypes)));
|
bind(RepositoryTypesRegistry.class).toInstance(repositoryTypes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.repositories;
|
package org.elasticsearch.repositories;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
import org.elasticsearch.common.inject.AbstractModule;
|
||||||
import org.elasticsearch.common.inject.Module;
|
import org.elasticsearch.common.inject.Module;
|
||||||
import org.elasticsearch.common.inject.Modules;
|
import org.elasticsearch.common.inject.Modules;
|
||||||
|
@ -29,12 +28,10 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
|
||||||
import static org.elasticsearch.common.Strings.toCamelCase;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This module spawns specific repository module
|
* Binds repository classes for the specific repository type.
|
||||||
*/
|
*/
|
||||||
public class RepositoryModule extends AbstractModule implements SpawnModules {
|
public class RepositoryModule extends AbstractModule {
|
||||||
|
|
||||||
private RepositoryName repositoryName;
|
private RepositoryName repositoryName;
|
||||||
|
|
||||||
|
@ -59,28 +56,12 @@ public class RepositoryModule extends AbstractModule implements SpawnModules {
|
||||||
this.typesRegistry = typesRegistry;
|
this.typesRegistry = typesRegistry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns repository module.
|
|
||||||
* <p/>
|
|
||||||
* First repository type is looked up in typesRegistry and if it's not found there, this module tries to
|
|
||||||
* load repository by it's class name.
|
|
||||||
*
|
|
||||||
* @return repository module
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public Iterable<? extends Module> spawnModules() {
|
|
||||||
Class<? extends Module> repoModuleClass = typesRegistry.type(repositoryName.type());
|
|
||||||
if (repoModuleClass == null) {
|
|
||||||
throw new IllegalArgumentException("Could not find repository type [" + repositoryName.getType() + "] for repository [" + repositoryName.getName() + "]");
|
|
||||||
}
|
|
||||||
return Collections.unmodifiableList(Arrays.asList(Modules.createModule(repoModuleClass, globalSettings)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected void configure() {
|
protected void configure() {
|
||||||
|
typesRegistry.bindType(binder(), repositoryName.type());
|
||||||
bind(RepositorySettings.class).toInstance(new RepositorySettings(globalSettings, settings));
|
bind(RepositorySettings.class).toInstance(new RepositorySettings(globalSettings, settings));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,31 +19,34 @@
|
||||||
|
|
||||||
package org.elasticsearch.repositories;
|
package org.elasticsearch.repositories;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import org.elasticsearch.common.inject.Binder;
|
||||||
import org.elasticsearch.common.inject.Module;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.ExtensionPoint;
|
||||||
|
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Map of registered repository types and associated with these types modules
|
* A mapping from type name to implementations of {@link Repository} and {@link IndexShardRepository}.
|
||||||
*/
|
*/
|
||||||
public class RepositoryTypesRegistry {
|
public class RepositoryTypesRegistry {
|
||||||
private final ImmutableMap<String, Class<? extends Module>> repositoryTypes;
|
// invariant: repositories and shardRepositories have the same keyset
|
||||||
|
private final ExtensionPoint.SelectedType<Repository> repositoryTypes =
|
||||||
|
new ExtensionPoint.SelectedType<>("repository", Repository.class);
|
||||||
|
private final ExtensionPoint.SelectedType<IndexShardRepository> shardRepositoryTypes =
|
||||||
|
new ExtensionPoint.SelectedType<>("index_repository", IndexShardRepository.class);
|
||||||
|
|
||||||
/**
|
/** Adds a new repository type to the registry, bound to the given implementation classes. */
|
||||||
* Creates new repository with given map of types
|
public void registerRepository(String name, Class<? extends Repository> repositoryType, Class<? extends IndexShardRepository> shardRepositoryType) {
|
||||||
*
|
repositoryTypes.registerExtension(name, repositoryType);
|
||||||
* @param repositoryTypes
|
shardRepositoryTypes.registerExtension(name, shardRepositoryType);
|
||||||
*/
|
|
||||||
public RepositoryTypesRegistry(ImmutableMap<String, Class<? extends Module>> repositoryTypes) {
|
|
||||||
this.repositoryTypes = repositoryTypes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns repository module class for the given type
|
* Looks up the given type and binds the implementation into the given binder.
|
||||||
*
|
* Throws an {@link IllegalArgumentException} if the given type does not exist.
|
||||||
* @param type repository type
|
|
||||||
* @return repository module class or null if type is not found
|
|
||||||
*/
|
*/
|
||||||
public Class<? extends Module> type(String type) {
|
public void bindType(Binder binder, String type) {
|
||||||
return repositoryTypes.get(type);
|
Settings settings = Settings.builder().put("type", type).build();
|
||||||
|
repositoryTypes.bindType(binder, settings, "type", null);
|
||||||
|
shardRepositoryTypes.bindType(binder, settings, "type", null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,6 @@ import java.util.Map;
|
||||||
public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue {
|
public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue {
|
||||||
|
|
||||||
private final int precision;
|
private final int precision;
|
||||||
private final boolean rehash;
|
|
||||||
private final ValuesSource valuesSource;
|
private final ValuesSource valuesSource;
|
||||||
|
|
||||||
// Expensive to initialize, so we only initialize it when we have an actual value source
|
// Expensive to initialize, so we only initialize it when we have an actual value source
|
||||||
|
@ -66,11 +65,10 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue
|
||||||
private Collector collector;
|
private Collector collector;
|
||||||
private ValueFormatter formatter;
|
private ValueFormatter formatter;
|
||||||
|
|
||||||
public CardinalityAggregator(String name, ValuesSource valuesSource, boolean rehash, int precision, ValueFormatter formatter,
|
public CardinalityAggregator(String name, ValuesSource valuesSource, int precision, ValueFormatter formatter,
|
||||||
AggregationContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
AggregationContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||||
super(name, context, parent, pipelineAggregators, metaData);
|
super(name, context, parent, pipelineAggregators, metaData);
|
||||||
this.valuesSource = valuesSource;
|
this.valuesSource = valuesSource;
|
||||||
this.rehash = rehash;
|
|
||||||
this.precision = precision;
|
this.precision = precision;
|
||||||
this.counts = valuesSource == null ? null : new HyperLogLogPlusPlus(precision, context.bigArrays(), 1);
|
this.counts = valuesSource == null ? null : new HyperLogLogPlusPlus(precision, context.bigArrays(), 1);
|
||||||
this.formatter = formatter;
|
this.formatter = formatter;
|
||||||
|
@ -85,13 +83,6 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue
|
||||||
if (valuesSource == null) {
|
if (valuesSource == null) {
|
||||||
return new EmptyCollector();
|
return new EmptyCollector();
|
||||||
}
|
}
|
||||||
// if rehash is false then the value source is either already hashed, or the user explicitly
|
|
||||||
// requested not to hash the values (perhaps they already hashed the values themselves before indexing the doc)
|
|
||||||
// so we can just work with the original value source as is
|
|
||||||
if (!rehash) {
|
|
||||||
MurmurHash3Values hashValues = MurmurHash3Values.cast(((ValuesSource.Numeric) valuesSource).longValues(ctx));
|
|
||||||
return new DirectCollector(counts, hashValues);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (valuesSource instanceof ValuesSource.Numeric) {
|
if (valuesSource instanceof ValuesSource.Numeric) {
|
||||||
ValuesSource.Numeric source = (ValuesSource.Numeric) valuesSource;
|
ValuesSource.Numeric source = (ValuesSource.Numeric) valuesSource;
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.search.aggregations.metrics.cardinality;
|
package org.elasticsearch.search.aggregations.metrics.cardinality;
|
||||||
|
|
||||||
import org.elasticsearch.search.aggregations.AggregationExecutionException;
|
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
|
import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
@ -35,12 +34,10 @@ import java.util.Map;
|
||||||
final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource> {
|
final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource> {
|
||||||
|
|
||||||
private final long precisionThreshold;
|
private final long precisionThreshold;
|
||||||
private final boolean rehash;
|
|
||||||
|
|
||||||
CardinalityAggregatorFactory(String name, ValuesSourceConfig config, long precisionThreshold, boolean rehash) {
|
CardinalityAggregatorFactory(String name, ValuesSourceConfig config, long precisionThreshold) {
|
||||||
super(name, InternalCardinality.TYPE.name(), config);
|
super(name, InternalCardinality.TYPE.name(), config);
|
||||||
this.precisionThreshold = precisionThreshold;
|
this.precisionThreshold = precisionThreshold;
|
||||||
this.rehash = rehash;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private int precision(Aggregator parent) {
|
private int precision(Aggregator parent) {
|
||||||
|
@ -50,16 +47,13 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory<V
|
||||||
@Override
|
@Override
|
||||||
protected Aggregator createUnmapped(AggregationContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
protected Aggregator createUnmapped(AggregationContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return new CardinalityAggregator(name, null, true, precision(parent), config.formatter(), context, parent, pipelineAggregators, metaData);
|
return new CardinalityAggregator(name, null, precision(parent), config.formatter(), context, parent, pipelineAggregators, metaData);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent,
|
protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent,
|
||||||
boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
|
||||||
if (!(valuesSource instanceof ValuesSource.Numeric) && !rehash) {
|
return new CardinalityAggregator(name, valuesSource, precision(parent), config.formatter(), context, parent, pipelineAggregators,
|
||||||
throw new AggregationExecutionException("Turning off rehashing for cardinality aggregation [" + name + "] on non-numeric values in not allowed");
|
|
||||||
}
|
|
||||||
return new CardinalityAggregator(name, valuesSource, rehash, precision(parent), config.formatter(), context, parent, pipelineAggregators,
|
|
||||||
metaData);
|
metaData);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,11 +21,9 @@ package org.elasticsearch.search.aggregations.metrics.cardinality;
|
||||||
|
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.index.mapper.core.Murmur3FieldMapper;
|
|
||||||
import org.elasticsearch.search.SearchParseException;
|
import org.elasticsearch.search.SearchParseException;
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
import org.elasticsearch.search.aggregations.AggregatorFactory;
|
||||||
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
|
||||||
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
|
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
|
|
||||||
|
@ -35,6 +33,7 @@ import java.io.IOException;
|
||||||
public class CardinalityParser implements Aggregator.Parser {
|
public class CardinalityParser implements Aggregator.Parser {
|
||||||
|
|
||||||
private static final ParseField PRECISION_THRESHOLD = new ParseField("precision_threshold");
|
private static final ParseField PRECISION_THRESHOLD = new ParseField("precision_threshold");
|
||||||
|
private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed");
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String type() {
|
public String type() {
|
||||||
|
@ -44,10 +43,9 @@ public class CardinalityParser implements Aggregator.Parser {
|
||||||
@Override
|
@Override
|
||||||
public AggregatorFactory parse(String name, XContentParser parser, SearchContext context) throws IOException {
|
public AggregatorFactory parse(String name, XContentParser parser, SearchContext context) throws IOException {
|
||||||
|
|
||||||
ValuesSourceParser vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
|
ValuesSourceParser<?> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
|
||||||
|
|
||||||
long precisionThreshold = -1;
|
long precisionThreshold = -1;
|
||||||
Boolean rehash = null;
|
|
||||||
|
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
String currentFieldName = null;
|
String currentFieldName = null;
|
||||||
|
@ -57,8 +55,8 @@ public class CardinalityParser implements Aggregator.Parser {
|
||||||
} else if (vsParser.token(currentFieldName, token, parser)) {
|
} else if (vsParser.token(currentFieldName, token, parser)) {
|
||||||
continue;
|
continue;
|
||||||
} else if (token.isValue()) {
|
} else if (token.isValue()) {
|
||||||
if ("rehash".equals(currentFieldName)) {
|
if (context.parseFieldMatcher().match(currentFieldName, REHASH)) {
|
||||||
rehash = parser.booleanValue();
|
// ignore
|
||||||
} else if (context.parseFieldMatcher().match(currentFieldName, PRECISION_THRESHOLD)) {
|
} else if (context.parseFieldMatcher().match(currentFieldName, PRECISION_THRESHOLD)) {
|
||||||
precisionThreshold = parser.longValue();
|
precisionThreshold = parser.longValue();
|
||||||
} else {
|
} else {
|
||||||
|
@ -70,15 +68,7 @@ public class CardinalityParser implements Aggregator.Parser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ValuesSourceConfig<?> config = vsParser.config();
|
return new CardinalityAggregatorFactory(name, vsParser.config(), precisionThreshold);
|
||||||
|
|
||||||
if (rehash == null && config.fieldContext() != null && config.fieldContext().fieldType() instanceof Murmur3FieldMapper.Murmur3FieldType) {
|
|
||||||
rehash = false;
|
|
||||||
} else if (rehash == null) {
|
|
||||||
rehash = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return new CardinalityAggregatorFactory(name, config, precisionThreshold, rehash);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,8 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
||||||
private String sortMode;
|
private String sortMode;
|
||||||
private QueryBuilder nestedFilter;
|
private QueryBuilder nestedFilter;
|
||||||
private String nestedPath;
|
private String nestedPath;
|
||||||
|
private Boolean coerce;
|
||||||
|
private Boolean ignoreMalformed;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs a new distance based sort on a geo point like field.
|
* Constructs a new distance based sort on a geo point like field.
|
||||||
|
@ -146,6 +148,16 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public GeoDistanceSortBuilder coerce(boolean coerce) {
|
||||||
|
this.coerce = coerce;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public GeoDistanceSortBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||||
|
this.ignoreMalformed = ignoreMalformed;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject("_geo_distance");
|
builder.startObject("_geo_distance");
|
||||||
|
@ -181,6 +193,12 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
||||||
if (nestedFilter != null) {
|
if (nestedFilter != null) {
|
||||||
builder.field("nested_filter", nestedFilter, params);
|
builder.field("nested_filter", nestedFilter, params);
|
||||||
}
|
}
|
||||||
|
if (coerce != null) {
|
||||||
|
builder.field("coerce", coerce);
|
||||||
|
}
|
||||||
|
if (ignoreMalformed != null) {
|
||||||
|
builder.field("ignore_malformed", ignoreMalformed);
|
||||||
|
}
|
||||||
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||||
import org.apache.lucene.util.BitSet;
|
import org.apache.lucene.util.BitSet;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.geo.GeoDistance;
|
import org.elasticsearch.common.geo.GeoDistance;
|
||||||
import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance;
|
import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
|
@ -42,7 +43,6 @@ import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
|
||||||
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
|
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
|
||||||
import org.elasticsearch.index.fielddata.NumericDoubleValues;
|
import org.elasticsearch.index.fielddata.NumericDoubleValues;
|
||||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||||
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
|
import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport;
|
||||||
|
@ -73,8 +73,9 @@ public class GeoDistanceSortParser implements SortParser {
|
||||||
MultiValueMode sortMode = null;
|
MultiValueMode sortMode = null;
|
||||||
NestedInnerQueryParseSupport nestedHelper = null;
|
NestedInnerQueryParseSupport nestedHelper = null;
|
||||||
|
|
||||||
boolean normalizeLon = true;
|
final boolean indexCreatedBeforeV2_0 = context.queryParserService().getIndexCreatedVersion().before(Version.V_2_0_0);
|
||||||
boolean normalizeLat = true;
|
boolean coerce = false;
|
||||||
|
boolean ignoreMalformed = false;
|
||||||
|
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
String currentName = parser.currentName();
|
String currentName = parser.currentName();
|
||||||
|
@ -107,9 +108,13 @@ public class GeoDistanceSortParser implements SortParser {
|
||||||
unit = DistanceUnit.fromString(parser.text());
|
unit = DistanceUnit.fromString(parser.text());
|
||||||
} else if (currentName.equals("distance_type") || currentName.equals("distanceType")) {
|
} else if (currentName.equals("distance_type") || currentName.equals("distanceType")) {
|
||||||
geoDistance = GeoDistance.fromString(parser.text());
|
geoDistance = GeoDistance.fromString(parser.text());
|
||||||
} else if ("normalize".equals(currentName)) {
|
} else if ("coerce".equals(currentName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentName))) {
|
||||||
normalizeLat = parser.booleanValue();
|
coerce = parser.booleanValue();
|
||||||
normalizeLon = parser.booleanValue();
|
if (coerce == true) {
|
||||||
|
ignoreMalformed = true;
|
||||||
|
}
|
||||||
|
} else if ("ignore_malformed".equals(currentName) && coerce == false) {
|
||||||
|
ignoreMalformed = parser.booleanValue();
|
||||||
} else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) {
|
} else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) {
|
||||||
sortMode = MultiValueMode.fromString(parser.text());
|
sortMode = MultiValueMode.fromString(parser.text());
|
||||||
} else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) {
|
} else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) {
|
||||||
|
@ -126,9 +131,21 @@ public class GeoDistanceSortParser implements SortParser {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (normalizeLat || normalizeLon) {
|
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||||
|
if (!indexCreatedBeforeV2_0 && !ignoreMalformed) {
|
||||||
for (GeoPoint point : geoPoints) {
|
for (GeoPoint point : geoPoints) {
|
||||||
GeoUtils.normalizePoint(point, normalizeLat, normalizeLon);
|
if (point.lat() > 90.0 || point.lat() < -90.0) {
|
||||||
|
throw new ElasticsearchParseException("illegal latitude value [{}] for [GeoDistanceSort]", point.lat());
|
||||||
|
}
|
||||||
|
if (point.lon() > 180.0 || point.lon() < -180) {
|
||||||
|
throw new ElasticsearchParseException("illegal longitude value [{}] for [GeoDistanceSort]", point.lon());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (coerce) {
|
||||||
|
for (GeoPoint point : geoPoints) {
|
||||||
|
GeoUtils.normalizePoint(point, coerce, coerce);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -146,8 +146,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
// node id to actual channel
|
// node id to actual channel
|
||||||
protected final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
|
protected final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
|
||||||
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
|
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
|
||||||
protected final Map<String, Channel> serverChannels = newConcurrentMap();
|
protected final Map<String, List<Channel>> serverChannels = newConcurrentMap();
|
||||||
protected final Map<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
|
protected final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
|
||||||
protected volatile TransportServiceAdapter transportServiceAdapter;
|
protected volatile TransportServiceAdapter transportServiceAdapter;
|
||||||
protected volatile BoundTransportAddress boundAddress;
|
protected volatile BoundTransportAddress boundAddress;
|
||||||
protected final KeyedLock<String> connectionLock = new KeyedLock<>();
|
protected final KeyedLock<String> connectionLock = new KeyedLock<>();
|
||||||
|
@ -286,7 +286,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
bindServerBootstrap(name, mergedSettings);
|
bindServerBootstrap(name, mergedSettings);
|
||||||
}
|
}
|
||||||
|
|
||||||
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(DEFAULT_PROFILE).getLocalAddress();
|
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(DEFAULT_PROFILE).get(0).getLocalAddress();
|
||||||
int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort()));
|
int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort()));
|
||||||
String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
|
String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
|
||||||
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
|
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
|
||||||
|
@ -397,23 +397,38 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
|
|
||||||
private void bindServerBootstrap(final String name, final Settings settings) {
|
private void bindServerBootstrap(final String name, final Settings settings) {
|
||||||
// Bind and start to accept incoming connections.
|
// Bind and start to accept incoming connections.
|
||||||
InetAddress hostAddressX;
|
InetAddress hostAddresses[];
|
||||||
String bindHost = settings.get("bind_host");
|
String bindHost = settings.get("bind_host");
|
||||||
try {
|
try {
|
||||||
hostAddressX = networkService.resolveBindHostAddress(bindHost);
|
hostAddresses = networkService.resolveBindHostAddress(bindHost);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e);
|
throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e);
|
||||||
}
|
}
|
||||||
final InetAddress hostAddress = hostAddressX;
|
for (InetAddress hostAddress : hostAddresses) {
|
||||||
|
bindServerBootstrap(name, hostAddress, settings);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void bindServerBootstrap(final String name, final InetAddress hostAddress, Settings settings) {
|
||||||
|
|
||||||
String port = settings.get("port");
|
String port = settings.get("port");
|
||||||
PortsRange portsRange = new PortsRange(port);
|
PortsRange portsRange = new PortsRange(port);
|
||||||
final AtomicReference<Exception> lastException = new AtomicReference<>();
|
final AtomicReference<Exception> lastException = new AtomicReference<>();
|
||||||
|
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>();
|
||||||
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
|
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
|
||||||
@Override
|
@Override
|
||||||
public boolean onPortNumber(int portNumber) {
|
public boolean onPortNumber(int portNumber) {
|
||||||
try {
|
try {
|
||||||
serverChannels.put(name, serverBootstraps.get(name).bind(new InetSocketAddress(hostAddress, portNumber)));
|
Channel channel = serverBootstraps.get(name).bind(new InetSocketAddress(hostAddress, portNumber));
|
||||||
|
synchronized (serverChannels) {
|
||||||
|
List<Channel> list = serverChannels.get(name);
|
||||||
|
if (list == null) {
|
||||||
|
list = new ArrayList<>();
|
||||||
|
serverChannels.put(name, list);
|
||||||
|
}
|
||||||
|
list.add(channel);
|
||||||
|
boundSocket.set(channel.getLocalAddress());
|
||||||
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
lastException.set(e);
|
lastException.set(e);
|
||||||
return false;
|
return false;
|
||||||
|
@ -426,14 +441,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!DEFAULT_PROFILE.equals(name)) {
|
if (!DEFAULT_PROFILE.equals(name)) {
|
||||||
InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(name).getLocalAddress();
|
InetSocketAddress boundAddress = (InetSocketAddress) boundSocket.get();
|
||||||
int publishPort = settings.getAsInt("publish_port", boundAddress.getPort());
|
int publishPort = settings.getAsInt("publish_port", boundAddress.getPort());
|
||||||
String publishHost = settings.get("publish_host", boundAddress.getHostString());
|
String publishHost = settings.get("publish_host", boundAddress.getHostString());
|
||||||
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
|
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
|
||||||
profileBoundAddresses.put(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)));
|
// TODO: support real multihoming with publishing. Today we use putIfAbsent so only the prioritized address is published
|
||||||
|
profileBoundAddresses.putIfAbsent(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)));
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug("Bound profile [{}] to address [{}]", name, serverChannels.get(name).getLocalAddress());
|
logger.info("Bound profile [{}] to address [{}]", name, boundSocket.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createServerBootstrap(String name, Settings settings) {
|
private void createServerBootstrap(String name, Settings settings) {
|
||||||
|
@ -500,15 +516,17 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
nodeChannels.close();
|
nodeChannels.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator<Map.Entry<String, Channel>> serverChannelIterator = serverChannels.entrySet().iterator();
|
Iterator<Map.Entry<String, List<Channel>>> serverChannelIterator = serverChannels.entrySet().iterator();
|
||||||
while (serverChannelIterator.hasNext()) {
|
while (serverChannelIterator.hasNext()) {
|
||||||
Map.Entry<String, Channel> serverChannelEntry = serverChannelIterator.next();
|
Map.Entry<String, List<Channel>> serverChannelEntry = serverChannelIterator.next();
|
||||||
String name = serverChannelEntry.getKey();
|
String name = serverChannelEntry.getKey();
|
||||||
Channel serverChannel = serverChannelEntry.getValue();
|
List<Channel> serverChannels = serverChannelEntry.getValue();
|
||||||
try {
|
for (Channel serverChannel : serverChannels) {
|
||||||
serverChannel.close().awaitUninterruptibly();
|
try {
|
||||||
} catch (Throwable t) {
|
serverChannel.close().awaitUninterruptibly();
|
||||||
logger.debug("Error closing serverChannel for profile [{}]", t, name);
|
} catch (Throwable t) {
|
||||||
|
logger.debug("Error closing serverChannel for profile [{}]", t, name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
serverChannelIterator.remove();
|
serverChannelIterator.remove();
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ DESCRIPTION
|
||||||
|
|
||||||
EXAMPLES
|
EXAMPLES
|
||||||
|
|
||||||
plugin install elasticsearch-analysis-kuromoji
|
plugin install analysis-kuromoji
|
||||||
|
|
||||||
plugin install elasticsearch/shield/latest
|
plugin install elasticsearch/shield/latest
|
||||||
|
|
||||||
|
@ -32,23 +32,24 @@ OFFICIAL PLUGINS
|
||||||
|
|
||||||
The following plugins are officially supported and can be installed by just referring to their name
|
The following plugins are officially supported and can be installed by just referring to their name
|
||||||
|
|
||||||
- elasticsearch-analysis-icu
|
- analysis-icu
|
||||||
- elasticsearch-analysis-kuromoji
|
- analysis-kuromoji
|
||||||
- elasticsearch-analysis-phonetic
|
- analysis-phonetic
|
||||||
- elasticsearch-analysis-smartcn
|
- analysis-smartcn
|
||||||
- elasticsearch-analysis-stempel
|
- analysis-stempel
|
||||||
- elasticsearch-cloud-aws
|
- cloud-aws
|
||||||
- elasticsearch-cloud-azure
|
- cloud-azure
|
||||||
- elasticsearch-cloud-gce
|
- cloud-gce
|
||||||
- elasticsearch-delete-by-query
|
- delete-by-query
|
||||||
- elasticsearch-lang-javascript
|
- lang-javascript
|
||||||
- elasticsearch-lang-python
|
- lang-python
|
||||||
- elasticsearch-mapper-size
|
- mapper-murmur3
|
||||||
|
- mapper-size
|
||||||
|
|
||||||
|
|
||||||
OPTIONS
|
OPTIONS
|
||||||
|
|
||||||
-u,--url URL to retrive the plugin from
|
-u,--url URL to retrieve the plugin from
|
||||||
|
|
||||||
-t,--timeout Timeout until the plugin download is abort
|
-t,--timeout Timeout until the plugin download is abort
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.*;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||||
import org.elasticsearch.monitor.fs.FsInfo;
|
import org.elasticsearch.monitor.fs.FsInfo;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -167,7 +168,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
||||||
usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes());
|
usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes());
|
||||||
paths[0] = path;
|
paths[0] = path;
|
||||||
FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), paths);
|
FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), paths);
|
||||||
return new NodeStats(new DiscoveryNode(nodeName, null, Version.V_2_0_0_beta1),
|
return new NodeStats(new DiscoveryNode(nodeName, DummyTransportAddress.INSTANCE, Version.CURRENT),
|
||||||
System.currentTimeMillis(),
|
System.currentTimeMillis(),
|
||||||
null, null, null, null, null,
|
null, null, null, null, null,
|
||||||
fsInfo,
|
fsInfo,
|
||||||
|
|
|
@ -0,0 +1,77 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.network;
|
||||||
|
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.net.InetAddress;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests for network utils. Please avoid using any methods that cause DNS lookups!
|
||||||
|
*/
|
||||||
|
public class NetworkUtilsTests extends ESTestCase {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* test sort key order respects PREFER_IPV4
|
||||||
|
*/
|
||||||
|
public void testSortKey() throws Exception {
|
||||||
|
InetAddress localhostv4 = InetAddress.getByName("127.0.0.1");
|
||||||
|
InetAddress localhostv6 = InetAddress.getByName("::1");
|
||||||
|
assertTrue(NetworkUtils.sortKey(localhostv4, false) < NetworkUtils.sortKey(localhostv6, false));
|
||||||
|
assertTrue(NetworkUtils.sortKey(localhostv6, true) < NetworkUtils.sortKey(localhostv4, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* test ordinary addresses sort before private addresses
|
||||||
|
*/
|
||||||
|
public void testSortKeySiteLocal() throws Exception {
|
||||||
|
InetAddress siteLocal = InetAddress.getByName("172.16.0.1");
|
||||||
|
assert siteLocal.isSiteLocalAddress();
|
||||||
|
InetAddress ordinary = InetAddress.getByName("192.192.192.192");
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary, true) < NetworkUtils.sortKey(siteLocal, true));
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary, false) < NetworkUtils.sortKey(siteLocal, false));
|
||||||
|
|
||||||
|
InetAddress siteLocal6 = InetAddress.getByName("fec0::1");
|
||||||
|
assert siteLocal6.isSiteLocalAddress();
|
||||||
|
InetAddress ordinary6 = InetAddress.getByName("fddd::1");
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary6, true) < NetworkUtils.sortKey(siteLocal6, true));
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary6, false) < NetworkUtils.sortKey(siteLocal6, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* test private addresses sort before link local addresses
|
||||||
|
*/
|
||||||
|
public void testSortKeyLinkLocal() throws Exception {
|
||||||
|
InetAddress linkLocal = InetAddress.getByName("fe80::1");
|
||||||
|
assert linkLocal.isLinkLocalAddress();
|
||||||
|
InetAddress ordinary = InetAddress.getByName("fddd::1");
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary, true) < NetworkUtils.sortKey(linkLocal, true));
|
||||||
|
assertTrue(NetworkUtils.sortKey(ordinary, false) < NetworkUtils.sortKey(linkLocal, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test filtering out ipv4/ipv6 addresses
|
||||||
|
*/
|
||||||
|
public void testFilter() throws Exception {
|
||||||
|
InetAddress addresses[] = { InetAddress.getByName("::1"), InetAddress.getByName("127.0.0.1") };
|
||||||
|
assertArrayEquals(new InetAddress[] { InetAddress.getByName("127.0.0.1") }, NetworkUtils.filterIPV4(addresses));
|
||||||
|
assertArrayEquals(new InetAddress[] { InetAddress.getByName("::1") }, NetworkUtils.filterIPV6(addresses));
|
||||||
|
}
|
||||||
|
}
|
|
@ -1116,7 +1116,7 @@ public class GetActionIT extends ESIntegTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testGeneratedNumberFieldsUnstored() throws IOException {
|
public void testGeneratedNumberFieldsUnstored() throws IOException {
|
||||||
indexSingleDocumentWithNumericFieldsGeneratedFromText(false, randomBoolean());
|
indexSingleDocumentWithNumericFieldsGeneratedFromText(false, randomBoolean());
|
||||||
String[] fieldsList = {"token_count", "text.token_count", "murmur", "text.murmur"};
|
String[] fieldsList = {"token_count", "text.token_count"};
|
||||||
// before refresh - document is only in translog
|
// before refresh - document is only in translog
|
||||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||||
refresh();
|
refresh();
|
||||||
|
@ -1130,7 +1130,7 @@ public class GetActionIT extends ESIntegTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testGeneratedNumberFieldsStored() throws IOException {
|
public void testGeneratedNumberFieldsStored() throws IOException {
|
||||||
indexSingleDocumentWithNumericFieldsGeneratedFromText(true, randomBoolean());
|
indexSingleDocumentWithNumericFieldsGeneratedFromText(true, randomBoolean());
|
||||||
String[] fieldsList = {"token_count", "text.token_count", "murmur", "text.murmur"};
|
String[] fieldsList = {"token_count", "text.token_count"};
|
||||||
// before refresh - document is only in translog
|
// before refresh - document is only in translog
|
||||||
assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
|
assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||||
assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
|
assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
|
||||||
|
@ -1159,10 +1159,6 @@ public class GetActionIT extends ESIntegTestCase {
|
||||||
" \"analyzer\": \"standard\",\n" +
|
" \"analyzer\": \"standard\",\n" +
|
||||||
" \"store\": \"" + storedString + "\"" +
|
" \"store\": \"" + storedString + "\"" +
|
||||||
" },\n" +
|
" },\n" +
|
||||||
" \"murmur\": {\n" +
|
|
||||||
" \"type\": \"murmur3\",\n" +
|
|
||||||
" \"store\": \"" + storedString + "\"" +
|
|
||||||
" },\n" +
|
|
||||||
" \"text\": {\n" +
|
" \"text\": {\n" +
|
||||||
" \"type\": \"string\",\n" +
|
" \"type\": \"string\",\n" +
|
||||||
" \"fields\": {\n" +
|
" \"fields\": {\n" +
|
||||||
|
@ -1170,10 +1166,6 @@ public class GetActionIT extends ESIntegTestCase {
|
||||||
" \"type\": \"token_count\",\n" +
|
" \"type\": \"token_count\",\n" +
|
||||||
" \"analyzer\": \"standard\",\n" +
|
" \"analyzer\": \"standard\",\n" +
|
||||||
" \"store\": \"" + storedString + "\"" +
|
" \"store\": \"" + storedString + "\"" +
|
||||||
" },\n" +
|
|
||||||
" \"murmur\": {\n" +
|
|
||||||
" \"type\": \"murmur3\",\n" +
|
|
||||||
" \"store\": \"" + storedString + "\"" +
|
|
||||||
" }\n" +
|
" }\n" +
|
||||||
" }\n" +
|
" }\n" +
|
||||||
" }" +
|
" }" +
|
||||||
|
@ -1185,7 +1177,6 @@ public class GetActionIT extends ESIntegTestCase {
|
||||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
|
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
String doc = "{\n" +
|
String doc = "{\n" +
|
||||||
" \"murmur\": \"Some value that can be hashed\",\n" +
|
|
||||||
" \"token_count\": \"A text with five words.\",\n" +
|
" \"token_count\": \"A text with five words.\",\n" +
|
||||||
" \"text\": \"A text with five words.\"\n" +
|
" \"text\": \"A text with five words.\"\n" +
|
||||||
"}\n";
|
"}\n";
|
||||||
|
|
|
@ -535,7 +535,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
|
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
|
||||||
counter.incrementAndGet();
|
counter.incrementAndGet();
|
||||||
return searcher;
|
return searcher;
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,10 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.mapper.geo;
|
package org.elasticsearch.index.mapper.geo;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||||
|
@ -26,6 +29,7 @@ import org.elasticsearch.index.mapper.MapperParsingException;
|
||||||
import org.elasticsearch.index.mapper.MergeResult;
|
import org.elasticsearch.index.mapper.MergeResult;
|
||||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
|
import org.elasticsearch.test.VersionUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -138,7 +142,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
public void testNormalizeLatLonValuesDefault() throws Exception {
|
public void testNormalizeLatLonValuesDefault() throws Exception {
|
||||||
// default to normalize
|
// default to normalize
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
|
.startObject("properties").startObject("point").field("type", "geo_point").field("coerce", true)
|
||||||
|
.field("ignore_malformed", true).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
|
|
||||||
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
||||||
|
@ -171,7 +176,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testValidateLatLonValues() throws Exception {
|
public void testValidateLatLonValues() throws Exception {
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", true).endObject().endObject()
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("coerce", false)
|
||||||
|
.field("ignore_malformed", false).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
|
|
||||||
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
||||||
|
@ -231,7 +237,8 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testNoValidateLatLonValues() throws Exception {
|
public void testNoValidateLatLonValues() throws Exception {
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", false).endObject().endObject()
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("coerce", false)
|
||||||
|
.field("ignore_malformed", true).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
|
|
||||||
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
|
||||||
|
@ -472,30 +479,161 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
|
assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that expected exceptions are thrown when creating a new index with deprecated options
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testOptionDeprecation() throws Exception {
|
||||||
|
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||||
|
// test deprecation exceptions on newly created indexes
|
||||||
|
try {
|
||||||
|
String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(validateMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate : true]");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate_lat", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(validateMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lat : true]");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String validateMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate_lon", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(validateMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [validate_lon : true]");
|
||||||
|
}
|
||||||
|
|
||||||
|
// test deprecated normalize
|
||||||
|
try {
|
||||||
|
String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(normalizeMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize : true]");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize_lat", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(normalizeMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lat : true]");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String normalizeMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize_lon", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(normalizeMapping);
|
||||||
|
fail("process completed successfully when " + MapperParsingException.class.getName() + " expected");
|
||||||
|
} catch (MapperParsingException e) {
|
||||||
|
assertEquals(e.getMessage(), "Mapping definition for [point] has unsupported parameters: [normalize_lon : true]");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test backward compatibility
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testBackwardCompatibleOptions() throws Exception {
|
||||||
|
// backward compatibility testing
|
||||||
|
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_1_0_0,
|
||||||
|
Version.V_1_7_1)).build();
|
||||||
|
|
||||||
|
// validate
|
||||||
|
DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser();
|
||||||
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate", false).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"ignore_malformed\":true"));
|
||||||
|
|
||||||
|
mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate_lat", false).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"ignore_malformed\":true"));
|
||||||
|
|
||||||
|
mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("validate_lon", false).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"ignore_malformed\":true"));
|
||||||
|
|
||||||
|
// normalize
|
||||||
|
mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"coerce\":true"));
|
||||||
|
|
||||||
|
mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize_lat", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"coerce\":true"));
|
||||||
|
|
||||||
|
mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
|
.field("normalize_lon", true).endObject().endObject()
|
||||||
|
.endObject().endObject().string();
|
||||||
|
parser.parse(mapping);
|
||||||
|
assertThat(parser.parse(mapping).mapping().toString(), containsString("\"coerce\":true"));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGeoPointMapperMerge() throws Exception {
|
public void testGeoPointMapperMerge() throws Exception {
|
||||||
String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
.field("validate", true).endObject().endObject()
|
.field("ignore_malformed", true).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||||
DocumentMapper stage1 = parser.parse(stage1Mapping);
|
DocumentMapper stage1 = parser.parse(stage1Mapping);
|
||||||
String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).field("geohash", true)
|
||||||
.field("validate", false).endObject().endObject()
|
.field("ignore_malformed", false).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
DocumentMapper stage2 = parser.parse(stage2Mapping);
|
DocumentMapper stage2 = parser.parse(stage2Mapping);
|
||||||
|
|
||||||
MergeResult mergeResult = stage1.merge(stage2.mapping(), false, false);
|
MergeResult mergeResult = stage1.merge(stage2.mapping(), false, false);
|
||||||
assertThat(mergeResult.hasConflicts(), equalTo(true));
|
assertThat(mergeResult.hasConflicts(), equalTo(true));
|
||||||
assertThat(mergeResult.buildConflicts().length, equalTo(2));
|
assertThat(mergeResult.buildConflicts().length, equalTo(1));
|
||||||
// todo better way of checking conflict?
|
// todo better way of checking conflict?
|
||||||
assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts()))));
|
assertThat("mapper [point] has different lat_lon", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts()))));
|
||||||
|
|
||||||
// correct mapping and ensure no failures
|
// correct mapping and ensure no failures
|
||||||
stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
.startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
|
||||||
.field("validate", true).field("normalize", true).endObject().endObject()
|
.field("ignore_malformed", true).endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
stage2 = parser.parse(stage2Mapping);
|
stage2 = parser.parse(stage2Mapping);
|
||||||
mergeResult = stage1.merge(stage2.mapping(), false, false);
|
mergeResult = stage1.merge(stage2.mapping(), false, false);
|
||||||
|
|
|
@ -31,7 +31,7 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numProperties() {
|
protected int numProperties() {
|
||||||
return 6 + super.numProperties();
|
return 4 + super.numProperties();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -40,11 +40,9 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase {
|
||||||
switch (propNum) {
|
switch (propNum) {
|
||||||
case 0: gft.setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); break;
|
case 0: gft.setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); break;
|
||||||
case 1: gft.setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); break;
|
case 1: gft.setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); break;
|
||||||
case 2: gft.setValidateLon(!gft.validateLon()); break;
|
case 2: gft.setIgnoreMalformed(!gft.ignoreMalformed()); break;
|
||||||
case 3: gft.setValidateLat(!gft.validateLat()); break;
|
case 3: gft.setCoerce(!gft.coerce()); break;
|
||||||
case 4: gft.setNormalizeLon(!gft.normalizeLon()); break;
|
default: super.modifyProperty(ft, propNum - 4);
|
||||||
case 5: gft.setNormalizeLat(!gft.normalizeLat()); break;
|
|
||||||
default: super.modifyProperty(ft, propNum - 6);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -339,34 +339,28 @@ public class GeoUtilsTests extends ESTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testNormalizePoint_outsideNormalRange_withOptions() {
|
public void testNormalizePoint_outsideNormalRange_withOptions() {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
boolean normLat = randomBoolean();
|
boolean normalize = randomBoolean();
|
||||||
boolean normLon = randomBoolean();
|
|
||||||
double normalisedLat = (randomDouble() * 180.0) - 90.0;
|
double normalisedLat = (randomDouble() * 180.0) - 90.0;
|
||||||
double normalisedLon = (randomDouble() * 360.0) - 180.0;
|
double normalisedLon = (randomDouble() * 360.0) - 180.0;
|
||||||
int shiftLat = randomIntBetween(1, 10000);
|
int shift = randomIntBetween(1, 10000);
|
||||||
int shiftLon = randomIntBetween(1, 10000);
|
double testLat = normalisedLat + (180.0 * shift);
|
||||||
double testLat = normalisedLat + (180.0 * shiftLat);
|
double testLon = normalisedLon + (360.0 * shift);
|
||||||
double testLon = normalisedLon + (360.0 * shiftLon);
|
|
||||||
|
|
||||||
double expectedLat;
|
double expectedLat;
|
||||||
double expectedLon;
|
double expectedLon;
|
||||||
if (normLat) {
|
if (normalize) {
|
||||||
expectedLat = normalisedLat * (shiftLat % 2 == 0 ? 1 : -1);
|
expectedLat = normalisedLat * (shift % 2 == 0 ? 1 : -1);
|
||||||
} else {
|
expectedLon = normalisedLon + ((shift % 2 == 1) ? 180 : 0);
|
||||||
expectedLat = testLat;
|
|
||||||
}
|
|
||||||
if (normLon) {
|
|
||||||
expectedLon = normalisedLon + ((normLat && shiftLat % 2 == 1) ? 180 : 0);
|
|
||||||
if (expectedLon > 180.0) {
|
if (expectedLon > 180.0) {
|
||||||
expectedLon -= 360;
|
expectedLon -= 360;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
double shiftValue = normalisedLon > 0 ? -180 : 180;
|
expectedLat = testLat;
|
||||||
expectedLon = testLon + ((normLat && shiftLat % 2 == 1) ? shiftValue : 0);
|
expectedLon = testLon;
|
||||||
}
|
}
|
||||||
GeoPoint testPoint = new GeoPoint(testLat, testLon);
|
GeoPoint testPoint = new GeoPoint(testLat, testLon);
|
||||||
GeoPoint expectedPoint = new GeoPoint(expectedLat, expectedLon);
|
GeoPoint expectedPoint = new GeoPoint(expectedLat, expectedLon);
|
||||||
GeoUtils.normalizePoint(testPoint, normLat, normLon);
|
GeoUtils.normalizePoint(testPoint, normalize, normalize);
|
||||||
assertThat("Unexpected Latitude", testPoint.lat(), closeTo(expectedPoint.lat(), MAX_ACCEPTABLE_ERROR));
|
assertThat("Unexpected Latitude", testPoint.lat(), closeTo(expectedPoint.lat(), MAX_ACCEPTABLE_ERROR));
|
||||||
assertThat("Unexpected Longitude", testPoint.lon(), closeTo(expectedPoint.lon(), MAX_ACCEPTABLE_ERROR));
|
assertThat("Unexpected Longitude", testPoint.lon(), closeTo(expectedPoint.lon(), MAX_ACCEPTABLE_ERROR));
|
||||||
}
|
}
|
||||||
|
|
|
@ -344,7 +344,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
||||||
.addAlias(new Alias("templated_alias-{index}"))
|
.addAlias(new Alias("templated_alias-{index}"))
|
||||||
.addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}"))
|
.addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}"))
|
||||||
.addAlias(new Alias("complex_filtered_alias")
|
.addAlias(new Alias("complex_filtered_alias")
|
||||||
.filter(QueryBuilders.termsQuery("_type", "typeX", "typeY", "typeZ").execution("bool")))
|
.filter(QueryBuilders.termsQuery("_type", "typeX", "typeY", "typeZ")))
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ"));
|
assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ"));
|
||||||
|
|
|
@ -52,9 +52,7 @@ import javax.net.ssl.HttpsURLConnection;
|
||||||
import javax.net.ssl.SSLContext;
|
import javax.net.ssl.SSLContext;
|
||||||
import javax.net.ssl.SSLSocketFactory;
|
import javax.net.ssl.SSLSocketFactory;
|
||||||
import java.io.BufferedWriter;
|
import java.io.BufferedWriter;
|
||||||
import java.io.FileOutputStream;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.PrintStream;
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.FileVisitResult;
|
import java.nio.file.FileVisitResult;
|
||||||
|
@ -539,17 +537,19 @@ public class PluginManagerIT extends ESIntegTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testOfficialPluginName_ThrowsException() throws IOException {
|
public void testOfficialPluginName_ThrowsException() throws IOException {
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-analysis-icu");
|
PluginManager.checkForOfficialPlugins("analysis-icu");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-analysis-kuromoji");
|
PluginManager.checkForOfficialPlugins("analysis-kuromoji");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-analysis-phonetic");
|
PluginManager.checkForOfficialPlugins("analysis-phonetic");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-analysis-smartcn");
|
PluginManager.checkForOfficialPlugins("analysis-smartcn");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-analysis-stempel");
|
PluginManager.checkForOfficialPlugins("analysis-stempel");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-cloud-aws");
|
PluginManager.checkForOfficialPlugins("cloud-aws");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-cloud-azure");
|
PluginManager.checkForOfficialPlugins("cloud-azure");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-cloud-gce");
|
PluginManager.checkForOfficialPlugins("cloud-gce");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-delete-by-query");
|
PluginManager.checkForOfficialPlugins("delete-by-query");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-lang-javascript");
|
PluginManager.checkForOfficialPlugins("lang-javascript");
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-lang-python");
|
PluginManager.checkForOfficialPlugins("lang-python");
|
||||||
|
PluginManager.checkForOfficialPlugins("mapper-murmur3");
|
||||||
|
PluginManager.checkForOfficialPlugins("mapper-size");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
PluginManager.checkForOfficialPlugins("elasticsearch-mapper-attachment");
|
PluginManager.checkForOfficialPlugins("elasticsearch-mapper-attachment");
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
.build();
|
.build();
|
||||||
Environment environment = new Environment(settings);
|
Environment environment = new Environment(settings);
|
||||||
|
|
||||||
PluginManager.PluginHandle pluginHandle = new PluginManager.PluginHandle(pluginName, "version", "user", "repo");
|
PluginManager.PluginHandle pluginHandle = new PluginManager.PluginHandle(pluginName, "version", "user");
|
||||||
String configDirPath = Files.simplifyPath(pluginHandle.configDir(environment).normalize().toString());
|
String configDirPath = Files.simplifyPath(pluginHandle.configDir(environment).normalize().toString());
|
||||||
String expectedDirPath = Files.simplifyPath(genericConfigFolder.resolve(pluginName).normalize().toString());
|
String expectedDirPath = Files.simplifyPath(genericConfigFolder.resolve(pluginName).normalize().toString());
|
||||||
|
|
||||||
|
@ -82,12 +82,12 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
Iterator<URL> iterator = handle.urls().iterator();
|
Iterator<URL> iterator = handle.urls().iterator();
|
||||||
|
|
||||||
if (supportStagingUrls) {
|
if (supportStagingUrls) {
|
||||||
String expectedStagingURL = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/elasticsearch-%s-%s/org/elasticsearch/plugin/elasticsearch-%s/%s/elasticsearch-%s-%s.zip",
|
String expectedStagingURL = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip",
|
||||||
Version.CURRENT.number(), Build.CURRENT.hashShort(), pluginName, Version.CURRENT.number(), pluginName, Version.CURRENT.number());
|
Version.CURRENT.number(), Build.CURRENT.hashShort(), pluginName, Version.CURRENT.number(), pluginName, Version.CURRENT.number());
|
||||||
assertThat(iterator.next().toExternalForm(), is(expectedStagingURL));
|
assertThat(iterator.next().toExternalForm(), is(expectedStagingURL));
|
||||||
}
|
}
|
||||||
|
|
||||||
URL expected = new URL("http", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/elasticsearch-" + pluginName + "/" + Version.CURRENT.number() + "/elasticsearch-" +
|
URL expected = new URL("http", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/" + pluginName + "/" + Version.CURRENT.number() + "/" +
|
||||||
pluginName + "-" + Version.CURRENT.number() + ".zip");
|
pluginName + "-" + Version.CURRENT.number() + ".zip");
|
||||||
assertThat(iterator.next().toExternalForm(), is(expected.toExternalForm()));
|
assertThat(iterator.next().toExternalForm(), is(expected.toExternalForm()));
|
||||||
|
|
||||||
|
@ -95,10 +95,10 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTrimmingElasticsearchFromOfficialPluginName() throws IOException {
|
public void testOfficialPluginName() throws IOException {
|
||||||
String randomPluginName = randomFrom(PluginManager.OFFICIAL_PLUGINS.asList()).replaceFirst("elasticsearch-", "");
|
String randomPluginName = randomFrom(PluginManager.OFFICIAL_PLUGINS.asList());
|
||||||
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(randomPluginName);
|
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(randomPluginName);
|
||||||
assertThat(handle.name, is(randomPluginName.replaceAll("^elasticsearch-", "")));
|
assertThat(handle.name, is(randomPluginName));
|
||||||
|
|
||||||
boolean supportStagingUrls = randomBoolean();
|
boolean supportStagingUrls = randomBoolean();
|
||||||
if (supportStagingUrls) {
|
if (supportStagingUrls) {
|
||||||
|
@ -108,12 +108,12 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
Iterator<URL> iterator = handle.urls().iterator();
|
Iterator<URL> iterator = handle.urls().iterator();
|
||||||
|
|
||||||
if (supportStagingUrls) {
|
if (supportStagingUrls) {
|
||||||
String expectedStagingUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/elasticsearch-%s-%s/org/elasticsearch/plugin/elasticsearch-%s/%s/elasticsearch-%s-%s.zip",
|
String expectedStagingUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip",
|
||||||
Version.CURRENT.number(), Build.CURRENT.hashShort(), randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
Version.CURRENT.number(), Build.CURRENT.hashShort(), randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
||||||
assertThat(iterator.next().toExternalForm(), is(expectedStagingUrl));
|
assertThat(iterator.next().toExternalForm(), is(expectedStagingUrl));
|
||||||
}
|
}
|
||||||
|
|
||||||
String releaseUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/elasticsearch-%s/%s/elasticsearch-%s-%s.zip",
|
String releaseUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip",
|
||||||
randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
||||||
assertThat(iterator.next().toExternalForm(), is(releaseUrl));
|
assertThat(iterator.next().toExternalForm(), is(releaseUrl));
|
||||||
|
|
||||||
|
@ -121,12 +121,11 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTrimmingElasticsearchFromGithubPluginName() throws IOException {
|
public void testGithubPluginName() throws IOException {
|
||||||
String user = randomAsciiOfLength(6);
|
String user = randomAsciiOfLength(6);
|
||||||
String randomName = randomAsciiOfLength(10);
|
String pluginName = randomAsciiOfLength(10);
|
||||||
String pluginName = randomFrom("elasticsearch-", "es-") + randomName;
|
|
||||||
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(user + "/" + pluginName);
|
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(user + "/" + pluginName);
|
||||||
assertThat(handle.name, is(randomName));
|
assertThat(handle.name, is(pluginName));
|
||||||
assertThat(handle.urls(), hasSize(1));
|
assertThat(handle.urls(), hasSize(1));
|
||||||
assertThat(handle.urls().get(0).toExternalForm(), is(new URL("https", "github.com", "/" + user + "/" + pluginName + "/" + "archive/master.zip").toExternalForm()));
|
assertThat(handle.urls().get(0).toExternalForm(), is(new URL("https", "github.com", "/" + user + "/" + pluginName + "/" + "archive/master.zip").toExternalForm()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,54 +61,23 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
jsonBuilder().startObject().startObject("type").startObject("properties")
|
jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||||
.startObject("str_value")
|
.startObject("str_value")
|
||||||
.field("type", "string")
|
.field("type", "string")
|
||||||
.startObject("fields")
|
|
||||||
.startObject("hash")
|
|
||||||
.field("type", "murmur3")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("str_values")
|
.startObject("str_values")
|
||||||
.field("type", "string")
|
.field("type", "string")
|
||||||
.startObject("fields")
|
|
||||||
.startObject("hash")
|
|
||||||
.field("type", "murmur3")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("l_value")
|
.startObject("l_value")
|
||||||
.field("type", "long")
|
.field("type", "long")
|
||||||
.startObject("fields")
|
|
||||||
.startObject("hash")
|
|
||||||
.field("type", "murmur3")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("l_values")
|
.startObject("l_values")
|
||||||
.field("type", "long")
|
.field("type", "long")
|
||||||
.startObject("fields")
|
|
||||||
.startObject("hash")
|
|
||||||
.field("type", "murmur3")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("d_value")
|
.startObject("d_value")
|
||||||
.field("type", "double")
|
.field("type", "double")
|
||||||
.startObject("fields")
|
.endObject()
|
||||||
.startObject("hash")
|
.startObject("d_values")
|
||||||
.field("type", "murmur3")
|
.field("type", "double")
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject().endObject().endObject()).execute().actionGet();
|
||||||
.endObject()
|
|
||||||
.startObject("d_values")
|
|
||||||
.field("type", "double")
|
|
||||||
.startObject("fields")
|
|
||||||
.startObject("hash")
|
|
||||||
.field("type", "murmur3")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject().endObject()).execute().actionGet();
|
|
||||||
|
|
||||||
numDocs = randomIntBetween(2, 100);
|
numDocs = randomIntBetween(2, 100);
|
||||||
precisionThreshold = randomIntBetween(0, 1 << randomInt(20));
|
precisionThreshold = randomIntBetween(0, 1 << randomInt(20));
|
||||||
|
@ -145,12 +114,12 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
assertThat(count.getValue(), greaterThan(0L));
|
assertThat(count.getValue(), greaterThan(0L));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private String singleNumericField(boolean hash) {
|
private String singleNumericField() {
|
||||||
return (randomBoolean() ? "l_value" : "d_value") + (hash ? ".hash" : "");
|
return randomBoolean() ? "l_value" : "d_value";
|
||||||
}
|
}
|
||||||
|
|
||||||
private String multiNumericField(boolean hash) {
|
private String multiNumericField(boolean hash) {
|
||||||
return (randomBoolean() ? "l_values" : "d_values") + (hash ? ".hash" : "");
|
return randomBoolean() ? "l_values" : "d_values";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -195,24 +164,10 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
assertCount(count, numDocs);
|
assertCount(count, numDocs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void singleValuedStringHashed() throws Exception {
|
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
|
||||||
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value.hash"))
|
|
||||||
.execute().actionGet();
|
|
||||||
|
|
||||||
assertSearchResponse(response);
|
|
||||||
|
|
||||||
Cardinality count = response.getAggregations().get("cardinality");
|
|
||||||
assertThat(count, notNullValue());
|
|
||||||
assertThat(count.getName(), equalTo("cardinality"));
|
|
||||||
assertCount(count, numDocs);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void singleValuedNumeric() throws Exception {
|
public void singleValuedNumeric() throws Exception {
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||||
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false)))
|
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertSearchResponse(response);
|
assertSearchResponse(response);
|
||||||
|
@ -229,7 +184,7 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
|
||||||
.addAggregation(
|
.addAggregation(
|
||||||
global("global").subAggregation(
|
global("global").subAggregation(
|
||||||
cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false))))
|
cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertSearchResponse(searchResponse);
|
assertSearchResponse(searchResponse);
|
||||||
|
@ -254,7 +209,7 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void singleValuedNumericHashed() throws Exception {
|
public void singleValuedNumericHashed() throws Exception {
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||||
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(true)))
|
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertSearchResponse(response);
|
assertSearchResponse(response);
|
||||||
|
@ -279,20 +234,6 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
assertCount(count, numDocs * 2);
|
assertCount(count, numDocs * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void multiValuedStringHashed() throws Exception {
|
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
|
||||||
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values.hash"))
|
|
||||||
.execute().actionGet();
|
|
||||||
|
|
||||||
assertSearchResponse(response);
|
|
||||||
|
|
||||||
Cardinality count = response.getAggregations().get("cardinality");
|
|
||||||
assertThat(count, notNullValue());
|
|
||||||
assertThat(count.getName(), equalTo("cardinality"));
|
|
||||||
assertCount(count, numDocs * 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void multiValuedNumeric() throws Exception {
|
public void multiValuedNumeric() throws Exception {
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||||
|
@ -356,7 +297,7 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||||
.addAggregation(
|
.addAggregation(
|
||||||
cardinality("cardinality").precisionThreshold(precisionThreshold).script(
|
cardinality("cardinality").precisionThreshold(precisionThreshold).script(
|
||||||
new Script("doc['" + singleNumericField(false) + "'].value")))
|
new Script("doc['" + singleNumericField() + "'].value")))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertSearchResponse(response);
|
assertSearchResponse(response);
|
||||||
|
@ -417,7 +358,7 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
public void singleValuedNumericValueScript() throws Exception {
|
public void singleValuedNumericValueScript() throws Exception {
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||||
.addAggregation(
|
.addAggregation(
|
||||||
cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false))
|
cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())
|
||||||
.script(new Script("_value")))
|
.script(new Script("_value")))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
|
@ -464,23 +405,4 @@ public class CardinalityIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void asSubAggHashed() throws Exception {
|
|
||||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
|
||||||
.addAggregation(terms("terms").field("str_value")
|
|
||||||
.collectMode(randomFrom(SubAggCollectionMode.values()))
|
|
||||||
.subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values.hash")))
|
|
||||||
.execute().actionGet();
|
|
||||||
|
|
||||||
assertSearchResponse(response);
|
|
||||||
|
|
||||||
Terms terms = response.getAggregations().get("terms");
|
|
||||||
for (Terms.Bucket bucket : terms.getBuckets()) {
|
|
||||||
Cardinality count = bucket.getAggregations().get("cardinality");
|
|
||||||
assertThat(count, notNullValue());
|
|
||||||
assertThat(count.getName(), equalTo("cardinality"));
|
|
||||||
assertCount(count, 2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -574,7 +574,8 @@ public class DecayFunctionScoreIT extends ESIntegTestCase {
|
||||||
"type",
|
"type",
|
||||||
jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
|
jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
|
||||||
.endObject().startObject("date").field("type", "date").endObject().startObject("num").field("type", "double")
|
.endObject().startObject("date").field("type", "date").endObject().startObject("num").field("type", "double")
|
||||||
.endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
|
.endObject().startObject("geo").field("type", "geo_point").field("coerce", true).endObject().endObject()
|
||||||
|
.endObject().endObject()));
|
||||||
ensureYellow();
|
ensureYellow();
|
||||||
int numDocs = 200;
|
int numDocs = 200;
|
||||||
List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
|
List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
|
||||||
|
|
|
@ -289,50 +289,50 @@ public class GeoBoundingBoxIT extends ESIntegTestCase {
|
||||||
SearchResponse searchResponse = client().prepareSearch()
|
SearchResponse searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(50, -180).bottomRight(-50, 180))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(50, -180).bottomRight(-50, 180))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(50, -180).bottomRight(-50, 180).type("indexed"))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(50, -180).bottomRight(-50, 180).type("indexed"))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(90, -180).bottomRight(-90, 180))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(90, -180).bottomRight(-90, 180))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(90, -180).bottomRight(-90, 180).type("indexed"))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(90, -180).bottomRight(-90, 180).type("indexed"))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(50, 0).bottomRight(-50, 360))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(50, 0).bottomRight(-50, 360))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(50, 0).bottomRight(-50, 360).type("indexed"))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(50, 0).bottomRight(-50, 360).type("indexed"))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(90, 0).bottomRight(-90, 360))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(90, 0).bottomRight(-90, 360))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
||||||
searchResponse = client().prepareSearch()
|
searchResponse = client().prepareSearch()
|
||||||
.setQuery(
|
.setQuery(
|
||||||
filteredQuery(matchAllQuery(),
|
filteredQuery(matchAllQuery(),
|
||||||
geoBoundingBoxQuery("location").topLeft(90, 0).bottomRight(-90, 360).type("indexed"))
|
geoBoundingBoxQuery("location").coerce(true).topLeft(90, 0).bottomRight(-90, 360).type("indexed"))
|
||||||
).execute().actionGet();
|
).execute().actionGet();
|
||||||
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,8 +221,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
||||||
public void testDistanceSortingMVFields() throws Exception {
|
public void testDistanceSortingMVFields() throws Exception {
|
||||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
|
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
|
||||||
.startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
|
.startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
|
||||||
.startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
|
.field("ignore_malformed", true).field("coerce", true).startObject("fielddata")
|
||||||
.endObject().endObject();
|
.field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject().endObject().endObject();
|
||||||
assertAcked(prepareCreate("test")
|
assertAcked(prepareCreate("test")
|
||||||
.addMapping("type1", xContentBuilder));
|
.addMapping("type1", xContentBuilder));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -233,6 +233,11 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
||||||
.endObject()).execute().actionGet();
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
|
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
|
||||||
|
.field("names", "New York 2")
|
||||||
|
.startObject("locations").field("lat", 400.7143528).field("lon", 285.9990269).endObject()
|
||||||
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
|
client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
|
||||||
.field("names", "Times Square", "Tribeca")
|
.field("names", "Times Square", "Tribeca")
|
||||||
.startArray("locations")
|
.startArray("locations")
|
||||||
// to NY: 5.286 km
|
// to NY: 5.286 km
|
||||||
|
@ -242,7 +247,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
||||||
.endArray()
|
.endArray()
|
||||||
.endObject()).execute().actionGet();
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
|
client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
|
||||||
.field("names", "Wall Street", "Soho")
|
.field("names", "Wall Street", "Soho")
|
||||||
.startArray("locations")
|
.startArray("locations")
|
||||||
// to NY: 1.055 km
|
// to NY: 1.055 km
|
||||||
|
@ -253,7 +258,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
||||||
.endObject()).execute().actionGet();
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
|
|
||||||
client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
|
client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
|
||||||
.field("names", "Greenwich Village", "Brooklyn")
|
.field("names", "Greenwich Village", "Brooklyn")
|
||||||
.startArray("locations")
|
.startArray("locations")
|
||||||
// to NY: 2.029 km
|
// to NY: 2.029 km
|
||||||
|
@ -270,70 +275,76 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
|
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
|
||||||
|
|
||||||
// Order: Asc, Mode: max
|
// Order: Asc, Mode: max
|
||||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
|
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
|
||||||
|
|
||||||
// Order: Desc
|
// Order: Desc
|
||||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
|
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
|
|
||||||
// Order: Desc, Mode: min
|
// Order: Desc, Mode: min
|
||||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
|
assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
|
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
|
||||||
.execute().actionGet();
|
.execute().actionGet();
|
||||||
|
|
||||||
assertHitCount(searchResponse, 4);
|
assertHitCount(searchResponse, 5);
|
||||||
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
|
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
|
||||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||||
|
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||||
|
|
||||||
assertFailures(client().prepareSearch("test").setQuery(matchAllQuery())
|
assertFailures(client().prepareSearch("test").setQuery(matchAllQuery())
|
||||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum")),
|
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum")),
|
||||||
|
|
|
@ -1165,7 +1165,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFieldDatatermsQuery() throws Exception {
|
public void testTermsQuery() throws Exception {
|
||||||
assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double"));
|
assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double"));
|
||||||
|
|
||||||
indexRandom(true,
|
indexRandom(true,
|
||||||
|
@ -1175,60 +1175,60 @@ public class SearchQueryIT extends ESIntegTestCase {
|
||||||
client().prepareIndex("test", "type", "4").setSource("str", "4", "lng", 4l, "dbl", 4.0d));
|
client().prepareIndex("test", "type", "4").setSource("str", "4", "lng", 4l, "dbl", 4.0d));
|
||||||
|
|
||||||
SearchResponse searchResponse = client().prepareSearch("test")
|
SearchResponse searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "1", "4").execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "1", "4"))).get();
|
||||||
assertHitCount(searchResponse, 2l);
|
assertHitCount(searchResponse, 2l);
|
||||||
assertSearchHits(searchResponse, "1", "4");
|
assertSearchHits(searchResponse, "1", "4");
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 3}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 3}))).get();
|
||||||
assertHitCount(searchResponse, 2l);
|
assertHitCount(searchResponse, 2l);
|
||||||
assertSearchHits(searchResponse, "2", "3");
|
assertSearchHits(searchResponse, "2", "3");
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[]{2, 3}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[]{2, 3}))).get();
|
||||||
assertHitCount(searchResponse, 2l);
|
assertHitCount(searchResponse, 2l);
|
||||||
assertSearchHits(searchResponse, "2", "3");
|
assertSearchHits(searchResponse, "2", "3");
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new int[] {1, 3}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new int[] {1, 3}))).get();
|
||||||
assertHitCount(searchResponse, 2l);
|
assertHitCount(searchResponse, 2l);
|
||||||
assertSearchHits(searchResponse, "1", "3");
|
assertSearchHits(searchResponse, "1", "3");
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new float[] {2, 4}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new float[] {2, 4}))).get();
|
||||||
assertHitCount(searchResponse, 2l);
|
assertHitCount(searchResponse, 2l);
|
||||||
assertSearchHits(searchResponse, "2", "4");
|
assertSearchHits(searchResponse, "2", "4");
|
||||||
|
|
||||||
// test partial matching
|
// test partial matching
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "2", "5").execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "2", "5"))).get();
|
||||||
assertNoFailures(searchResponse);
|
assertNoFailures(searchResponse);
|
||||||
assertHitCount(searchResponse, 1l);
|
assertHitCount(searchResponse, 1l);
|
||||||
assertFirstHit(searchResponse, hasId("2"));
|
assertFirstHit(searchResponse, hasId("2"));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {2, 5}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {2, 5}))).get();
|
||||||
assertNoFailures(searchResponse);
|
assertNoFailures(searchResponse);
|
||||||
assertHitCount(searchResponse, 1l);
|
assertHitCount(searchResponse, 1l);
|
||||||
assertFirstHit(searchResponse, hasId("2"));
|
assertFirstHit(searchResponse, hasId("2"));
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 5}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 5}))).get();
|
||||||
assertNoFailures(searchResponse);
|
assertNoFailures(searchResponse);
|
||||||
assertHitCount(searchResponse, 1l);
|
assertHitCount(searchResponse, 1l);
|
||||||
assertFirstHit(searchResponse, hasId("2"));
|
assertFirstHit(searchResponse, hasId("2"));
|
||||||
|
|
||||||
// test valid type, but no matching terms
|
// test valid type, but no matching terms
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "5", "6").execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "5", "6"))).get();
|
||||||
assertHitCount(searchResponse, 0l);
|
assertHitCount(searchResponse, 0l);
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {5, 6}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {5, 6}))).get();
|
||||||
assertHitCount(searchResponse, 0l);
|
assertHitCount(searchResponse, 0l);
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
searchResponse = client().prepareSearch("test")
|
||||||
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {5, 6}).execution("fielddata"))).get();
|
.setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {5, 6}))).get();
|
||||||
assertHitCount(searchResponse, 0l);
|
assertHitCount(searchResponse, 0l);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,13 +28,13 @@ import org.elasticsearch.cluster.ClusterStateListener;
|
||||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||||
|
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.repositories.RepositoriesService;
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryPlugin;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -57,7 +57,10 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
|
return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
|
||||||
.extendArray("plugin.types", MockRepositoryPlugin.class.getName()).build();
|
// Rebalancing is causing some checks after restore to randomly fail
|
||||||
|
// due to https://github.com/elastic/elasticsearch/issues/9421
|
||||||
|
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)
|
||||||
|
.extendArray("plugin.types", MockRepository.Plugin.class.getName()).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static long getFailureCount(String repository) {
|
public static long getFailureCount(String repository) {
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.carrotsearch.hppc.IntSet;
|
||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.common.util.concurrent.ListenableFuture;
|
import com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.ListenableActionFuture;
|
import org.elasticsearch.action.ListenableActionFuture;
|
||||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||||
|
@ -41,8 +40,8 @@ import org.elasticsearch.cluster.AbstractDiffable;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
|
import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
|
import org.elasticsearch.cluster.metadata.MetaData.Custom;
|
||||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -64,11 +63,9 @@ import org.elasticsearch.rest.RestRequest;
|
||||||
import org.elasticsearch.rest.RestResponse;
|
import org.elasticsearch.rest.RestResponse;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
|
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
|
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
|
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryPlugin;
|
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||||
import org.junit.Ignore;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -88,7 +85,15 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||||
import static org.hamcrest.Matchers.*;
|
import static org.hamcrest.Matchers.allOf;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
|
import static org.hamcrest.Matchers.lessThan;
|
||||||
|
import static org.hamcrest.Matchers.not;
|
||||||
|
import static org.hamcrest.Matchers.notNullValue;
|
||||||
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
|
@ -615,7 +620,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
@Test
|
@Test
|
||||||
public void registrationFailureTest() {
|
public void registrationFailureTest() {
|
||||||
logger.info("--> start first node");
|
logger.info("--> start first node");
|
||||||
internalCluster().startNode(settingsBuilder().put("plugin.types", MockRepositoryPlugin.class.getName()));
|
internalCluster().startNode(settingsBuilder().put("plugin.types", MockRepository.Plugin.class.getName()));
|
||||||
logger.info("--> start second node");
|
logger.info("--> start second node");
|
||||||
// Make sure the first node is elected as master
|
// Make sure the first node is elected as master
|
||||||
internalCluster().startNode(settingsBuilder().put("node.master", false));
|
internalCluster().startNode(settingsBuilder().put("node.master", false));
|
||||||
|
@ -634,7 +639,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
|
public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
|
||||||
Settings nodeSettings = settingsBuilder().put("plugin.types", MockRepositoryPlugin.class.getName()).build();
|
Settings nodeSettings = settingsBuilder().put("plugin.types", MockRepository.Plugin.class.getName()).build();
|
||||||
logger.info("--> start two nodes");
|
logger.info("--> start two nodes");
|
||||||
internalCluster().startNodesAsync(2, nodeSettings).get();
|
internalCluster().startNodesAsync(2, nodeSettings).get();
|
||||||
// Register mock repositories
|
// Register mock repositories
|
||||||
|
|
|
@ -32,15 +32,12 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.repositories.RepositoryException;
|
import org.elasticsearch.repositories.RepositoryException;
|
||||||
import org.elasticsearch.repositories.RepositoryVerificationException;
|
import org.elasticsearch.repositories.RepositoryVerificationException;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
|
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryPlugin;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
|
|
@ -64,7 +64,6 @@ import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.store.IndexStore;
|
import org.elasticsearch.index.store.IndexStore;
|
||||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||||
import org.elasticsearch.repositories.RepositoriesService;
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
|
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -308,7 +307,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||||
|
|
||||||
logger.info("--> create index with foo type");
|
logger.info("--> create index with foo type");
|
||||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
|
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
|
||||||
.put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
|
.put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
|
||||||
|
|
||||||
NumShards numShards = getNumShards("test-idx");
|
NumShards numShards = getNumShards("test-idx");
|
||||||
|
|
||||||
|
@ -323,7 +322,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||||
logger.info("--> delete the index and recreate it with bar type");
|
logger.info("--> delete the index and recreate it with bar type");
|
||||||
cluster().wipeIndices("test-idx");
|
cluster().wipeIndices("test-idx");
|
||||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
|
assertAcked(prepareCreate("test-idx", 2, Settings.builder()
|
||||||
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS)));
|
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS)));
|
||||||
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
|
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
|
@ -996,7 +995,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/12855")
|
|
||||||
public void renameOnRestoreTest() throws Exception {
|
public void renameOnRestoreTest() throws Exception {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
|
|
|
@ -19,10 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.snapshots.mockstore;
|
package org.elasticsearch.snapshots.mockstore;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||||
|
@ -30,11 +28,17 @@ import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
|
import org.elasticsearch.common.inject.AbstractModule;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
import org.elasticsearch.common.inject.Module;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.settings.SettingsFilter;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||||
|
import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
|
||||||
|
import org.elasticsearch.plugins.AbstractPlugin;
|
||||||
|
import org.elasticsearch.repositories.RepositoriesModule;
|
||||||
import org.elasticsearch.repositories.RepositoryName;
|
import org.elasticsearch.repositories.RepositoryName;
|
||||||
import org.elasticsearch.repositories.RepositorySettings;
|
import org.elasticsearch.repositories.RepositorySettings;
|
||||||
import org.elasticsearch.repositories.fs.FsRepository;
|
import org.elasticsearch.repositories.fs.FsRepository;
|
||||||
|
@ -46,6 +50,10 @@ import java.io.UnsupportedEncodingException;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
@ -54,10 +62,48 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||||
|
|
||||||
/**
|
|
||||||
*/
|
|
||||||
public class MockRepository extends FsRepository {
|
public class MockRepository extends FsRepository {
|
||||||
|
|
||||||
|
public static class Plugin extends AbstractPlugin {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return "mock-repository";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String description() {
|
||||||
|
return "Mock Repository";
|
||||||
|
}
|
||||||
|
|
||||||
|
public void onModule(RepositoriesModule repositoriesModule) {
|
||||||
|
repositoriesModule.registerRepository("mock", MockRepository.class, BlobStoreIndexShardRepository.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<Class<? extends Module>> modules() {
|
||||||
|
Collection<Class<? extends Module>> modules = new ArrayList<>();
|
||||||
|
modules.add(SettingsFilteringModule.class);
|
||||||
|
return modules;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SettingsFilteringModule extends AbstractModule {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void configure() {
|
||||||
|
bind(SettingsFilteringService.class).asEagerSingleton();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SettingsFilteringService {
|
||||||
|
@Inject
|
||||||
|
public SettingsFilteringService(SettingsFilter settingsFilter) {
|
||||||
|
settingsFilter.addFilter("secret.mock.password");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
private final AtomicLong failureCounter = new AtomicLong();
|
private final AtomicLong failureCounter = new AtomicLong();
|
||||||
|
|
||||||
public long getFailureCount() {
|
public long getFailureCount() {
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.snapshots.mockstore;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
|
||||||
import org.elasticsearch.common.inject.Inject;
|
|
||||||
import org.elasticsearch.common.inject.Module;
|
|
||||||
import org.elasticsearch.common.settings.SettingsFilter;
|
|
||||||
import org.elasticsearch.plugins.AbstractPlugin;
|
|
||||||
import org.elasticsearch.repositories.RepositoriesModule;
|
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
|
|
||||||
import static com.google.common.collect.Lists.newArrayList;
|
|
||||||
|
|
||||||
public class MockRepositoryPlugin extends AbstractPlugin {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String name() {
|
|
||||||
return "mock-repository";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String description() {
|
|
||||||
return "Mock Repository";
|
|
||||||
}
|
|
||||||
|
|
||||||
public void onModule(RepositoriesModule repositoriesModule) {
|
|
||||||
repositoriesModule.registerRepository("mock", MockRepositoryModule.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Collection<Class<? extends Module>> modules() {
|
|
||||||
Collection<Class<? extends Module>> modules = newArrayList();
|
|
||||||
modules.add(SettingsFilteringModule.class);
|
|
||||||
return modules;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class SettingsFilteringModule extends AbstractModule {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void configure() {
|
|
||||||
bind(SettingsFilteringService.class).asEagerSingleton();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class SettingsFilteringService {
|
|
||||||
@Inject
|
|
||||||
public SettingsFilteringService(SettingsFilter settingsFilter) {
|
|
||||||
settingsFilter.addFilter("secret.mock.password");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -108,6 +108,7 @@ import org.junit.Assert;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -504,7 +505,6 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
public static String clusterName(String prefix, long clusterSeed) {
|
public static String clusterName(String prefix, long clusterSeed) {
|
||||||
StringBuilder builder = new StringBuilder(prefix);
|
StringBuilder builder = new StringBuilder(prefix);
|
||||||
final int childVM = RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0);
|
final int childVM = RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0);
|
||||||
builder.append('-').append(NetworkUtils.getLocalHostName("__default_host__"));
|
|
||||||
builder.append("-CHILD_VM=[").append(childVM).append(']');
|
builder.append("-CHILD_VM=[").append(childVM).append(']');
|
||||||
builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
|
builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
|
||||||
// if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
|
// if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
|
||||||
|
|
|
@ -156,8 +156,8 @@ public class ReproduceInfoPrinter extends RunListener {
|
||||||
|
|
||||||
public ReproduceErrorMessageBuilder appendESProperties() {
|
public ReproduceErrorMessageBuilder appendESProperties() {
|
||||||
appendProperties("es.logger.level");
|
appendProperties("es.logger.level");
|
||||||
if (!inVerifyPhase()) {
|
if (inVerifyPhase()) {
|
||||||
// these properties only make sense for unit tests
|
// these properties only make sense for integration tests
|
||||||
appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES);
|
appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES);
|
||||||
}
|
}
|
||||||
appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms",
|
appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms",
|
||||||
|
|
|
@ -135,29 +135,6 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testThatBindingOnDifferentHostsWorks() throws Exception {
|
|
||||||
int[] ports = getRandomPorts(2);
|
|
||||||
InetAddress firstNonLoopbackAddress = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
|
|
||||||
assumeTrue("No IP-v4 non-loopback address available - are you on a plane?", firstNonLoopbackAddress != null);
|
|
||||||
Settings settings = settingsBuilder()
|
|
||||||
.put("network.host", "127.0.0.1")
|
|
||||||
.put("transport.tcp.port", ports[0])
|
|
||||||
.put("transport.profiles.default.bind_host", "127.0.0.1")
|
|
||||||
.put("transport.profiles.client1.bind_host", firstNonLoopbackAddress.getHostAddress())
|
|
||||||
.put("transport.profiles.client1.port", ports[1])
|
|
||||||
.build();
|
|
||||||
|
|
||||||
ThreadPool threadPool = new ThreadPool("tst");
|
|
||||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
|
||||||
assertPortIsBound("127.0.0.1", ports[0]);
|
|
||||||
assertPortIsBound(firstNonLoopbackAddress.getHostAddress(), ports[1]);
|
|
||||||
assertConnectionRefused(ports[1]);
|
|
||||||
} finally {
|
|
||||||
terminate(threadPool);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
|
public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
|
||||||
int[] ports = getRandomPorts(3);
|
int[] ports = getRandomPorts(3);
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
<project>
|
<project>
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>org.elasticsearch</groupId>
|
<groupId>org.elasticsearch</groupId>
|
||||||
<artifactId>elasticsearch-dev-tools</artifactId>
|
<artifactId>dev-tools</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
<name>Elasticsearch Build Resources</name>
|
<name>Build Tools and Resources</name>
|
||||||
<description>Tools to assist in building and developing in the Elasticsearch project</description>
|
<description>Tools to assist in building and developing in the Elasticsearch project</description>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.sonatype.oss</groupId>
|
<groupId>org.sonatype.oss</groupId>
|
||||||
|
|
|
@ -124,7 +124,7 @@
|
||||||
<waitfor maxwait="30" maxwaitunit="second"
|
<waitfor maxwait="30" maxwaitunit="second"
|
||||||
checkevery="500" checkeveryunit="millisecond"
|
checkevery="500" checkeveryunit="millisecond"
|
||||||
timeoutproperty="@{timeoutproperty}">
|
timeoutproperty="@{timeoutproperty}">
|
||||||
<http url="http://127.0.0.1:@{port}"/>
|
<http url="http://localhost:@{port}"/>
|
||||||
</waitfor>
|
</waitfor>
|
||||||
</sequential>
|
</sequential>
|
||||||
</macrodef>
|
</macrodef>
|
||||||
|
@ -138,7 +138,7 @@
|
||||||
<waitfor maxwait="30" maxwaitunit="second"
|
<waitfor maxwait="30" maxwaitunit="second"
|
||||||
checkevery="500" checkeveryunit="millisecond"
|
checkevery="500" checkeveryunit="millisecond"
|
||||||
timeoutproperty="@{timeoutproperty}">
|
timeoutproperty="@{timeoutproperty}">
|
||||||
<http url="http://127.0.0.1:@{port}/_cluster/health?wait_for_nodes=2"/>
|
<http url="http://localhost:@{port}/_cluster/health?wait_for_nodes=2"/>
|
||||||
</waitfor>
|
</waitfor>
|
||||||
</sequential>
|
</sequential>
|
||||||
</macrodef>
|
</macrodef>
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
java.net.URL#getPath()
|
java.net.URL#getPath()
|
||||||
java.net.URL#getFile()
|
java.net.URL#getFile()
|
||||||
|
|
||||||
|
@defaultMessage Usage of getLocalHost is discouraged
|
||||||
|
java.net.InetAddress#getLocalHost()
|
||||||
|
|
||||||
@defaultMessage Use java.nio.file instead of java.io.File API
|
@defaultMessage Use java.nio.file instead of java.io.File API
|
||||||
java.util.jar.JarFile
|
java.util.jar.JarFile
|
||||||
java.util.zip.ZipFile
|
java.util.zip.ZipFile
|
||||||
|
|
|
@ -30,6 +30,7 @@ $Source = File::Spec->rel2abs($Source);
|
||||||
|
|
||||||
say "LICENSE DIR: $License_Dir";
|
say "LICENSE DIR: $License_Dir";
|
||||||
say "SOURCE: $Source";
|
say "SOURCE: $Source";
|
||||||
|
say "IGNORE: $Ignore";
|
||||||
|
|
||||||
die "License dir is not a directory: $License_Dir\n" . usage()
|
die "License dir is not a directory: $License_Dir\n" . usage()
|
||||||
unless -d $License_Dir;
|
unless -d $License_Dir;
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.deb</groupId>
|
<groupId>org.elasticsearch.distribution.deb</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch DEB Distribution</name>
|
<name>Distribution: Deb</name>
|
||||||
<!--
|
<!--
|
||||||
We should use deb packaging here because we don't want to publish any jar.
|
We should use deb packaging here because we don't want to publish any jar.
|
||||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
<groupId>org.elasticsearch.distribution.fully-loaded</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch with all optional dependencies</name>
|
<name>Distribution: with all optional dependencies</name>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
|
|
|
@ -5,14 +5,14 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch</groupId>
|
<groupId>org.elasticsearch</groupId>
|
||||||
<artifactId>elasticsearch-parent</artifactId>
|
<artifactId>parent</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<packaging>pom</packaging>
|
<packaging>pom</packaging>
|
||||||
<name>Elasticsearch Distribution</name>
|
<name>Distribution: Parent POM</name>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<!-- Properties used for building RPM & DEB packages (see common/packaging.properties) -->
|
<!-- Properties used for building RPM & DEB packages (see common/packaging.properties) -->
|
||||||
|
@ -153,7 +153,7 @@
|
||||||
<parallelism>1</parallelism>
|
<parallelism>1</parallelism>
|
||||||
<systemProperties>
|
<systemProperties>
|
||||||
<!-- use external cluster -->
|
<!-- use external cluster -->
|
||||||
<tests.cluster>127.0.0.1:${integ.transport.port}</tests.cluster>
|
<tests.cluster>localhost:${integ.transport.port}</tests.cluster>
|
||||||
</systemProperties>
|
</systemProperties>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.rpm</groupId>
|
<groupId>org.elasticsearch.distribution.rpm</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch RPM Distribution</name>
|
<name>Distribution: RPM</name>
|
||||||
<packaging>rpm</packaging>
|
<packaging>rpm</packaging>
|
||||||
<description>The RPM distribution of Elasticsearch</description>
|
<description>The RPM distribution of Elasticsearch</description>
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.shaded</groupId>
|
<groupId>org.elasticsearch.distribution.shaded</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch Shaded Distribution</name>
|
<name>Distribution: Shaded JAR</name>
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
# hasn't been done, we assume that this is not a packaged version and the
|
# hasn't been done, we assume that this is not a packaged version and the
|
||||||
# user has forgotten to run Maven to create a package.
|
# user has forgotten to run Maven to create a package.
|
||||||
IS_PACKAGED_VERSION='${project.parent.artifactId}'
|
IS_PACKAGED_VERSION='${project.parent.artifactId}'
|
||||||
if [ "$IS_PACKAGED_VERSION" != "elasticsearch-distribution" ]; then
|
if [ "$IS_PACKAGED_VERSION" != "distributions" ]; then
|
||||||
cat >&2 << EOF
|
cat >&2 << EOF
|
||||||
Error: You must build the project with Maven or download a pre-built package
|
Error: You must build the project with Maven or download a pre-built package
|
||||||
before you can run Elasticsearch. See 'Building from Source' in README.textile
|
before you can run Elasticsearch. See 'Building from Source' in README.textile
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.tar</groupId>
|
<groupId>org.elasticsearch.distribution.tar</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch TAR Distribution</name>
|
<name>Distribution: TAR</name>
|
||||||
<!--
|
<!--
|
||||||
We should use pom packaging here because we don't want to publish any jar.
|
We should use pom packaging here because we don't want to publish any jar.
|
||||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.distribution</groupId>
|
<groupId>org.elasticsearch.distribution</groupId>
|
||||||
<artifactId>elasticsearch-distribution</artifactId>
|
<artifactId>distributions</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<groupId>org.elasticsearch.distribution.zip</groupId>
|
<groupId>org.elasticsearch.distribution.zip</groupId>
|
||||||
<artifactId>elasticsearch</artifactId>
|
<artifactId>elasticsearch</artifactId>
|
||||||
<name>Elasticsearch ZIP Distribution</name>
|
<name>Distribution: ZIP</name>
|
||||||
<!--
|
<!--
|
||||||
We should use pom packaging here because we don't want to publish any jar.
|
We should use pom packaging here because we don't want to publish any jar.
|
||||||
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
But if you do this, then maven lifecycle does not execute any test (nor compile any test)
|
||||||
|
|
|
@ -15,12 +15,6 @@ The delete by query plugin adds support for deleting all of the documents
|
||||||
replacement for the problematic _delete-by-query_ functionality which has been
|
replacement for the problematic _delete-by-query_ functionality which has been
|
||||||
removed from Elasticsearch core.
|
removed from Elasticsearch core.
|
||||||
|
|
||||||
https://github.com/elasticsearch/elasticsearch-mapper-attachments[Mapper Attachments Type plugin]::
|
|
||||||
|
|
||||||
Integrates http://lucene.apache.org/tika/[Apache Tika] to provide a new field
|
|
||||||
type `attachment` to allow indexing of documents such as PDFs and Microsoft
|
|
||||||
Word.
|
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Community contributed API extension plugins
|
=== Community contributed API extension plugins
|
||||||
|
|
||||||
|
|
|
@ -259,8 +259,8 @@ The following settings are supported:
|
||||||
|
|
||||||
`base_path`::
|
`base_path`::
|
||||||
|
|
||||||
Specifies the path within bucket to repository data. Defaults to root
|
Specifies the path within bucket to repository data. Defaults to
|
||||||
directory.
|
value of `repositories.s3.base_path` or to root directory if not set.
|
||||||
|
|
||||||
`access_key`::
|
`access_key`::
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ This plugin can be installed using the plugin manager:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
sudo bin/plugin install cloud-aws
|
sudo bin/plugin install cloud-azure
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
The plugin must be installed on every node in the cluster, and each node must
|
The plugin must be installed on every node in the cluster, and each node must
|
||||||
|
@ -27,7 +27,7 @@ The plugin can be removed with the following command:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
sudo bin/plugin remove cloud-aws
|
sudo bin/plugin remove cloud-azure
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
The node must be stopped before removing the plugin.
|
The node must be stopped before removing the plugin.
|
||||||
|
|
|
@ -0,0 +1,101 @@
|
||||||
|
[[mapper-murmur3]]
|
||||||
|
=== Mapper Murmur3 Plugin
|
||||||
|
|
||||||
|
The mapper-murmur3 plugin provides the ability to compute hash of field values
|
||||||
|
at index-time and store them in the index. This can sometimes be helpful when
|
||||||
|
running cardinality aggregations on high-cardinality and large string fields.
|
||||||
|
|
||||||
|
[[mapper-murmur3-install]]
|
||||||
|
[float]
|
||||||
|
==== Installation
|
||||||
|
|
||||||
|
This plugin can be installed using the plugin manager:
|
||||||
|
|
||||||
|
[source,sh]
|
||||||
|
----------------------------------------------------------------
|
||||||
|
sudo bin/plugin install mapper-murmur3
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
The plugin must be installed on every node in the cluster, and each node must
|
||||||
|
be restarted after installation.
|
||||||
|
|
||||||
|
[[mapper-murmur3-remove]]
|
||||||
|
[float]
|
||||||
|
==== Removal
|
||||||
|
|
||||||
|
The plugin can be removed with the following command:
|
||||||
|
|
||||||
|
[source,sh]
|
||||||
|
----------------------------------------------------------------
|
||||||
|
sudo bin/plugin remove mapper-murmur3
|
||||||
|
----------------------------------------------------------------
|
||||||
|
|
||||||
|
The node must be stopped before removing the plugin.
|
||||||
|
|
||||||
|
[[mapper-murmur3-usage]]
|
||||||
|
==== Using the `murmur3` field
|
||||||
|
|
||||||
|
The `murmur3` is typically used within a multi-field, so that both the original
|
||||||
|
value and its hash are stored in the index:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------
|
||||||
|
PUT my_index
|
||||||
|
{
|
||||||
|
"mappings": {
|
||||||
|
"my_type": {
|
||||||
|
"properties": {
|
||||||
|
"my_field": {
|
||||||
|
"type": "string",
|
||||||
|
"fields": {
|
||||||
|
"hash": {
|
||||||
|
"type": "murmur3"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------
|
||||||
|
// AUTOSENSE
|
||||||
|
|
||||||
|
Such a mapping would allow to refer to `my_field.hash` in order to get hashes
|
||||||
|
of the values of the `my_field` field. This is only useful in order to run
|
||||||
|
`cardinality` aggregations:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------
|
||||||
|
# Example documents
|
||||||
|
PUT my_index/my_type/1
|
||||||
|
{
|
||||||
|
"my_field": "This is a document"
|
||||||
|
}
|
||||||
|
|
||||||
|
PUT my_index/my_type/2
|
||||||
|
{
|
||||||
|
"my_field": "This is another document"
|
||||||
|
}
|
||||||
|
|
||||||
|
GET my_index/_search
|
||||||
|
{
|
||||||
|
"aggs": {
|
||||||
|
"my_field_cardinality": {
|
||||||
|
"cardinality": {
|
||||||
|
"field": "my_field.hash" <1>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------
|
||||||
|
// AUTOSENSE
|
||||||
|
|
||||||
|
<1> Counting unique values on the `my_field.hash` field
|
||||||
|
|
||||||
|
Running a `cardinality` aggregation on the `my_field` field directly would
|
||||||
|
yield the same result, however using `my_field.hash` instead might result in
|
||||||
|
a speed-up if the field has a high-cardinality. On the other hand, it is
|
||||||
|
discouraged to use the `murmur3` field on numeric fields and string fields
|
||||||
|
that are not almost unique as the use of a `murmur3` field is unlikely to
|
||||||
|
bring significant speed-ups, while increasing the amount of disk space required
|
||||||
|
to store the index.
|
|
@ -8,11 +8,22 @@ Mapper plugins allow new field datatypes to be added to Elasticsearch.
|
||||||
|
|
||||||
The core mapper plugins are:
|
The core mapper plugins are:
|
||||||
|
|
||||||
|
https://github.com/elasticsearch/elasticsearch-mapper-attachments[Mapper Attachments Type plugin]::
|
||||||
|
|
||||||
|
Integrates http://lucene.apache.org/tika/[Apache Tika] to provide a new field
|
||||||
|
type `attachment` to allow indexing of documents such as PDFs and Microsoft
|
||||||
|
Word.
|
||||||
|
|
||||||
<<mapper-size>>::
|
<<mapper-size>>::
|
||||||
|
|
||||||
The mapper-size plugin provides the `_size` meta field which, when enabled,
|
The mapper-size plugin provides the `_size` meta field which, when enabled,
|
||||||
indexes the size in bytes of the original
|
indexes the size in bytes of the original
|
||||||
{ref}/mapping-source-field.html[`_source`] field.
|
{ref}/mapping-source-field.html[`_source`] field.
|
||||||
|
|
||||||
include::mapper-size.asciidoc[]
|
<<mapper-murmur3>>::
|
||||||
|
|
||||||
|
The mapper-murmur3 plugin allows hashes to be computed at index-time and stored
|
||||||
|
in the index for later use with the `cardinality` aggregation.
|
||||||
|
|
||||||
|
include::mapper-size.asciidoc[]
|
||||||
|
include::mapper-murmur3.asciidoc[]
|
||||||
|
|
|
@ -223,18 +223,3 @@ plugin.mandatory: mapper-attachments,lang-groovy
|
||||||
|
|
||||||
For safety reasons, a node will not start if it is missing a mandatory plugin.
|
For safety reasons, a node will not start if it is missing a mandatory plugin.
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Lucene version dependent plugins
|
|
||||||
|
|
||||||
For some plugins, such as analysis plugins, a specific major Lucene version is
|
|
||||||
required to run. In that case, the plugin provides in its
|
|
||||||
`es-plugin.properties` file the Lucene version for which the plugin was built for.
|
|
||||||
|
|
||||||
If present at startup the node will check the Lucene version before loading
|
|
||||||
the plugin. You can disable that check using
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
--------------------------------------------------
|
|
||||||
plugins.check_lucene: false
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
|
|
|
@ -23,9 +23,9 @@ match a query:
|
||||||
|
|
||||||
==== Precision control
|
==== Precision control
|
||||||
|
|
||||||
This aggregation also supports the `precision_threshold` and `rehash` options:
|
This aggregation also supports the `precision_threshold` option:
|
||||||
|
|
||||||
experimental[The `precision_threshold` and `rehash` options are specific to the current internal implementation of the `cardinality` agg, which may change in the future]
|
experimental[The `precision_threshold` option is specific to the current internal implementation of the `cardinality` agg, which may change in the future]
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -34,8 +34,7 @@ experimental[The `precision_threshold` and `rehash` options are specific to the
|
||||||
"author_count" : {
|
"author_count" : {
|
||||||
"cardinality" : {
|
"cardinality" : {
|
||||||
"field" : "author_hash",
|
"field" : "author_hash",
|
||||||
"precision_threshold": 100, <1>
|
"precision_threshold": 100 <1>
|
||||||
"rehash": false <2>
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -49,11 +48,6 @@ supported value is 40000, thresholds above this number will have the same
|
||||||
effect as a threshold of 40000.
|
effect as a threshold of 40000.
|
||||||
Default value depends on the number of parent aggregations that multiple
|
Default value depends on the number of parent aggregations that multiple
|
||||||
create buckets (such as terms or histograms).
|
create buckets (such as terms or histograms).
|
||||||
<2> If you computed a hash on client-side, stored it into your documents and want
|
|
||||||
Elasticsearch to use them to compute counts using this hash function without
|
|
||||||
rehashing values, it is possible to specify `rehash: false`. Default value is
|
|
||||||
`true`. Please note that the hash must be indexed as a long when `rehash` is
|
|
||||||
false.
|
|
||||||
|
|
||||||
==== Counts are approximate
|
==== Counts are approximate
|
||||||
|
|
||||||
|
@ -86,47 +80,11 @@ counting millions of items.
|
||||||
|
|
||||||
==== Pre-computed hashes
|
==== Pre-computed hashes
|
||||||
|
|
||||||
If you don't want Elasticsearch to re-compute hashes on every run of this
|
On string fields that have a high cardinality, it might be faster to store the
|
||||||
aggregation, it is possible to use pre-computed hashes, either by computing a
|
hash of your field values in your index and then run the cardinality aggregation
|
||||||
hash on client-side, indexing it and specifying `rehash: false`, or by using
|
on this field. This can either be done by providing hash values from client-side
|
||||||
the special `murmur3` field mapper, typically in the context of a `multi-field`
|
or by letting elasticsearch compute hash values for you by using the
|
||||||
in the mapping:
|
{plugins}/mapper-size.html[`mapper-murmur3`] plugin.
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"author": {
|
|
||||||
"type": "string",
|
|
||||||
"fields": {
|
|
||||||
"hash": {
|
|
||||||
"type": "murmur3"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
With such a mapping, Elasticsearch is going to compute hashes of the `author`
|
|
||||||
field at indexing time and store them in the `author.hash` field. This
|
|
||||||
way, unique counts can be computed using the cardinality aggregation by only
|
|
||||||
loading the hashes into memory, not the values of the `author` field, and
|
|
||||||
without computing hashes on the fly:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"aggs" : {
|
|
||||||
"author_count" : {
|
|
||||||
"cardinality" : {
|
|
||||||
"field" : "author.hash"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
NOTE: `rehash` is automatically set to `false` when computing unique counts on
|
|
||||||
a `murmur3` field.
|
|
||||||
|
|
||||||
NOTE: Pre-computing hashes is usually only useful on very large and/or
|
NOTE: Pre-computing hashes is usually only useful on very large and/or
|
||||||
high-cardinality fields as it saves CPU and memory. However, on numeric
|
high-cardinality fields as it saves CPU and memory. However, on numeric
|
||||||
|
|
|
@ -114,7 +114,7 @@ If both `doc` and `script` is specified, then `doc` is ignored. Best is
|
||||||
to put your field pairs of the partial document in the script itself.
|
to put your field pairs of the partial document in the script itself.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== `detect_noop`
|
=== Detecting noop
|
||||||
|
|
||||||
By default if `doc` is specified then the document is always updated even
|
By default if `doc` is specified then the document is always updated even
|
||||||
if the merging process doesn't cause any changes. Specifying `detect_noop`
|
if the merging process doesn't cause any changes. Specifying `detect_noop`
|
||||||
|
@ -247,12 +247,10 @@ return the full updated source.
|
||||||
|
|
||||||
`version` & `version_type`::
|
`version` & `version_type`::
|
||||||
|
|
||||||
The Update API uses the Elasticsearch's versioning support internally to make
|
The update API uses the Elasticsearch's versioning support internally to make
|
||||||
sure the document doesn't change during the update. You can use the `version`
|
sure the document doesn't change during the update. You can use the `version`
|
||||||
parameter to specify that the document should only be updated if it's version
|
parameter to specify that the document should only be updated if it's version
|
||||||
matches the one specified. By setting version type to `force` you can force
|
matches the one specified. By setting version type to `force` you can force
|
||||||
the new version of the document after update (use with care! with `force`
|
the new version of the document after update (use with care! with `force`
|
||||||
there is no guarantee the document didn't change).Version types `external` &
|
there is no guarantee the document didn't change).Version types `external` &
|
||||||
`external_gte` are not supported.
|
`external_gte` are not supported.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ document:
|
||||||
<<search-suggesters-completion,Completion datatype>>::
|
<<search-suggesters-completion,Completion datatype>>::
|
||||||
`completion` to provide auto-complete suggestions
|
`completion` to provide auto-complete suggestions
|
||||||
<<token-count>>:: `token_count` to count the number of tokens in a string
|
<<token-count>>:: `token_count` to count the number of tokens in a string
|
||||||
|
{plugins}/mapper-size.html[`mapper-murmur3`]:: `murmur3` to compute hashes of values at index-time and store them in the index
|
||||||
|
|
||||||
Attachment datatype::
|
Attachment datatype::
|
||||||
|
|
||||||
|
|
|
@ -74,3 +74,7 @@ The following deprecated methods have been removed:
|
||||||
The redundant BytesQueryBuilder has been removed in favour of the
|
The redundant BytesQueryBuilder has been removed in favour of the
|
||||||
WrapperQueryBuilder internally.
|
WrapperQueryBuilder internally.
|
||||||
|
|
||||||
|
==== TermsQueryBuilder execution removed
|
||||||
|
|
||||||
|
The `TermsQueryBuilder#execution` method has been removed as it has no effect, it is ignored by the
|
||||||
|
corresponding parser.
|
||||||
|
|
|
@ -16,6 +16,11 @@ Facets, deprecated since 1.0, have now been removed. Instead, use the much
|
||||||
more powerful and flexible <<search-aggregations,aggregations>> framework.
|
more powerful and flexible <<search-aggregations,aggregations>> framework.
|
||||||
This also means that Kibana 3 will not work with Elasticsearch 2.0.
|
This also means that Kibana 3 will not work with Elasticsearch 2.0.
|
||||||
|
|
||||||
|
==== MVEL has been removed
|
||||||
|
|
||||||
|
The MVEL scripting language has been removed. The default scripting language
|
||||||
|
is now Groovy.
|
||||||
|
|
||||||
==== Delete-by-query is now a plugin
|
==== Delete-by-query is now a plugin
|
||||||
|
|
||||||
The old delete-by-query functionality was fast but unsafe. It could lead to
|
The old delete-by-query functionality was fast but unsafe. It could lead to
|
||||||
|
@ -41,6 +46,16 @@ can install the plugin with:
|
||||||
The `_shutdown` API has been removed without a replacement. Nodes should be
|
The `_shutdown` API has been removed without a replacement. Nodes should be
|
||||||
managed via the operating system and the provided start/stop scripts.
|
managed via the operating system and the provided start/stop scripts.
|
||||||
|
|
||||||
|
==== `murmur3` is now a plugin
|
||||||
|
|
||||||
|
The `murmur3` field, which indexes hashes of the field values, has been moved
|
||||||
|
out of core and is available as a plugin. It can be installed as:
|
||||||
|
|
||||||
|
[source,sh]
|
||||||
|
------------------
|
||||||
|
./bin/plugin install mapper-murmur3
|
||||||
|
------------------
|
||||||
|
|
||||||
==== `_size` is now a plugin
|
==== `_size` is now a plugin
|
||||||
|
|
||||||
The `_size` meta-data field, which indexes the size in bytes of the original
|
The `_size` meta-data field, which indexes the size in bytes of the original
|
||||||
|
|
|
@ -38,7 +38,7 @@ respond to. It provides the following settings with the
|
||||||
|`ttl` |The ttl of the multicast message. Defaults to `3`.
|
|`ttl` |The ttl of the multicast message. Defaults to `3`.
|
||||||
|
|
||||||
|`address` |The address to bind to, defaults to `null` which means it
|
|`address` |The address to bind to, defaults to `null` which means it
|
||||||
will bind to all available network interfaces.
|
will bind `network.bind_host`
|
||||||
|
|
||||||
|`enabled` |Whether multicast ping discovery is enabled. Defaults to `true`.
|
|`enabled` |Whether multicast ping discovery is enabled. Defaults to `true`.
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
|
@ -9,13 +9,15 @@ network settings allows to set common settings that will be shared among
|
||||||
all network based modules (unless explicitly overridden in each module).
|
all network based modules (unless explicitly overridden in each module).
|
||||||
|
|
||||||
The `network.bind_host` setting allows to control the host different network
|
The `network.bind_host` setting allows to control the host different network
|
||||||
components will bind on. By default, the bind host will be `anyLoopbackAddress`
|
components will bind on. By default, the bind host will be `_local_`
|
||||||
(typically `127.0.0.1` or `::1`).
|
(loopback addresses such as `127.0.0.1`, `::1`).
|
||||||
|
|
||||||
The `network.publish_host` setting allows to control the host the node will
|
The `network.publish_host` setting allows to control the host the node will
|
||||||
publish itself within the cluster so other nodes will be able to connect to it.
|
publish itself within the cluster so other nodes will be able to connect to it.
|
||||||
Of course, this can't be the `anyLocalAddress`, and by default, it will be the
|
Currently an elasticsearch node may be bound to multiple addresses, but only
|
||||||
first loopback address (if possible), or the local address.
|
publishes one. If not specified, this defaults to the "best" address from
|
||||||
|
`network.bind_host`. By default, IPv4 addresses are preferred to IPv6, and
|
||||||
|
ordinary addresses are preferred to site-local or link-local addresses.
|
||||||
|
|
||||||
The `network.host` setting is a simple setting to automatically set both
|
The `network.host` setting is a simple setting to automatically set both
|
||||||
`network.bind_host` and `network.publish_host` to the same host value.
|
`network.bind_host` and `network.publish_host` to the same host value.
|
||||||
|
@ -27,21 +29,25 @@ in the following table:
|
||||||
[cols="<,<",options="header",]
|
[cols="<,<",options="header",]
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|Logical Host Setting Value |Description
|
|Logical Host Setting Value |Description
|
||||||
|`_local_` |Will be resolved to the local ip address.
|
|`_local_` |Will be resolved to loopback addresses
|
||||||
|
|
||||||
|`_non_loopback_` |The first non loopback address.
|
|`_local:ipv4_` |Will be resolved to loopback IPv4 addresses
|
||||||
|
|
||||||
|`_non_loopback:ipv4_` |The first non loopback IPv4 address.
|
|`_local:ipv6_` |Will be resolved to loopback IPv6 addresses
|
||||||
|
|
||||||
|`_non_loopback:ipv6_` |The first non loopback IPv6 address.
|
|`_non_loopback_` |Addresses of the first non loopback interface
|
||||||
|
|
||||||
|`_[networkInterface]_` |Resolves to the ip address of the provided
|
|`_non_loopback:ipv4_` |IPv4 addresses of the first non loopback interface
|
||||||
|
|
||||||
|
|`_non_loopback:ipv6_` |IPv6 addresses of the first non loopback interface
|
||||||
|
|
||||||
|
|`_[networkInterface]_` |Resolves to the addresses of the provided
|
||||||
network interface. For example `_en0_`.
|
network interface. For example `_en0_`.
|
||||||
|
|
||||||
|`_[networkInterface]:ipv4_` |Resolves to the ipv4 address of the
|
|`_[networkInterface]:ipv4_` |Resolves to the ipv4 addresses of the
|
||||||
provided network interface. For example `_en0:ipv4_`.
|
provided network interface. For example `_en0:ipv4_`.
|
||||||
|
|
||||||
|`_[networkInterface]:ipv6_` |Resolves to the ipv6 address of the
|
|`_[networkInterface]:ipv6_` |Resolves to the ipv6 addresses of the
|
||||||
provided network interface. For example `_en0:ipv6_`.
|
provided network interface. For example `_en0:ipv6_`.
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,25 @@ Then the following simple query can be executed with a
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Query Options
|
||||||
|
|
||||||
|
[cols="<,<",options="header",]
|
||||||
|
|=======================================================================
|
||||||
|
|Option |Description
|
||||||
|
|`_name` |Optional name field to identify the filter
|
||||||
|
|
||||||
|
|`coerce` |Set to `true` to normalize longitude and latitude values to a
|
||||||
|
standard -180:180 / -90:90 coordinate system. (default is `false`).
|
||||||
|
|
||||||
|
|`ignore_malformed` |Set to `true` to
|
||||||
|
accept geo points with invalid latitude or longitude (default is `false`).
|
||||||
|
|
||||||
|
|`type` |Set to one of `indexed` or `memory` to defines whether this filter will
|
||||||
|
be executed in memory or indexed. See <<geo-bbox-type,Type>> below for further details
|
||||||
|
Default is `memory`.
|
||||||
|
|=======================================================================
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Accepted Formats
|
==== Accepted Formats
|
||||||
|
|
||||||
|
@ -195,6 +214,7 @@ a single location / point matches the filter, the document will be
|
||||||
included in the filter
|
included in the filter
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
|
[[geo-bbox-type]]
|
||||||
==== Type
|
==== Type
|
||||||
|
|
||||||
The type of the bounding box execution by default is set to `memory`,
|
The type of the bounding box execution by default is set to `memory`,
|
||||||
|
|
|
@ -158,6 +158,19 @@ The following are options allowed on the filter:
|
||||||
sure the `geo_point` type index lat lon in this case), or `none` which
|
sure the `geo_point` type index lat lon in this case), or `none` which
|
||||||
disables bounding box optimization.
|
disables bounding box optimization.
|
||||||
|
|
||||||
|
`_name`::
|
||||||
|
|
||||||
|
Optional name field to identify the query
|
||||||
|
|
||||||
|
`coerce`::
|
||||||
|
|
||||||
|
Set to `true` to normalize longitude and latitude values to a standard -180:180 / -90:90
|
||||||
|
coordinate system. (default is `false`).
|
||||||
|
|
||||||
|
`ignore_malformed`::
|
||||||
|
|
||||||
|
Set to `true` to accept geo points with invalid latitude or
|
||||||
|
longitude (default is `false`).
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== geo_point Type
|
==== geo_point Type
|
||||||
|
|
|
@ -24,7 +24,7 @@ Filters documents that exists within a range from a specific point:
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
Supports the same point location parameter as the
|
Supports the same point location parameter and query options as the
|
||||||
<<query-dsl-geo-distance-query,geo_distance>>
|
<<query-dsl-geo-distance-query,geo_distance>>
|
||||||
filter. And also support the common parameters for range (lt, lte, gt,
|
filter. And also support the common parameters for range (lt, lte, gt,
|
||||||
gte, from, to, include_upper and include_lower).
|
gte, from, to, include_upper and include_lower).
|
||||||
|
|
|
@ -26,6 +26,21 @@ points. Here is an example:
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Query Options
|
||||||
|
|
||||||
|
[cols="<,<",options="header",]
|
||||||
|
|=======================================================================
|
||||||
|
|Option |Description
|
||||||
|
|`_name` |Optional name field to identify the filter
|
||||||
|
|
||||||
|
|`coerce` |Set to `true` to normalize longitude and latitude values to a
|
||||||
|
standard -180:180 / -90:90 coordinate system. (default is `false`).
|
||||||
|
|
||||||
|
|`ignore_malformed` |Set to `true` to accept geo points with invalid latitude or
|
||||||
|
longitude (default is `false`).
|
||||||
|
|=======================================================================
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Allowed Formats
|
==== Allowed Formats
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ function migratePlugin() {
|
||||||
mkdir -p plugins/$1
|
mkdir -p plugins/$1
|
||||||
git mv -k * plugins/$1 > /dev/null 2>/dev/null
|
git mv -k * plugins/$1 > /dev/null 2>/dev/null
|
||||||
git rm .gitignore > /dev/null 2>/dev/null
|
git rm .gitignore > /dev/null 2>/dev/null
|
||||||
# echo "### change $1 groupId to org.elasticsearch.plugins"
|
# echo "### change $1 groupId to org.elasticsearch.plugin"
|
||||||
# Change the groupId to avoid conflicts with existing 2.0.0 versions.
|
# Change the groupId to avoid conflicts with existing 2.0.0 versions.
|
||||||
replaceLine " <groupId>org.elasticsearch<\/groupId>" " <groupId>org.elasticsearch.plugin<\/groupId>" "plugins/$1/pom.xml"
|
replaceLine " <groupId>org.elasticsearch<\/groupId>" " <groupId>org.elasticsearch.plugin<\/groupId>" "plugins/$1/pom.xml"
|
||||||
|
|
||||||
|
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
<parent>
|
<parent>
|
||||||
<groupId>org.elasticsearch.plugin</groupId>
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
<artifactId>elasticsearch-plugin</artifactId>
|
<artifactId>plugins</artifactId>
|
||||||
<version>2.1.0-SNAPSHOT</version>
|
<version>2.1.0-SNAPSHOT</version>
|
||||||
</parent>
|
</parent>
|
||||||
|
|
||||||
<artifactId>elasticsearch-analysis-icu</artifactId>
|
<artifactId>analysis-icu</artifactId>
|
||||||
<name>Elasticsearch ICU Analysis plugin</name>
|
<name>Plugin: Analysis: ICU</name>
|
||||||
<description>The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.</description>
|
<description>The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.</description>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue