Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
85d0613ea6
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.4.0-snapshot-0a7c3f462f
|
||||
lucene = 7.4.0-snapshot-518d303506
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.7
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
|
@ -607,7 +608,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testProvidedNamedXContents() {
|
||||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getProvidedNamedXContents();
|
||||
assertEquals(7, namedXContents.size());
|
||||
assertEquals(8, namedXContents.size());
|
||||
Map<Class<?>, Integer> categories = new HashMap<>();
|
||||
List<String> names = new ArrayList<>();
|
||||
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
|
||||
|
@ -625,9 +626,10 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertTrue(names.contains(PrecisionAtK.NAME));
|
||||
assertTrue(names.contains(DiscountedCumulativeGain.NAME));
|
||||
assertTrue(names.contains(MeanReciprocalRank.NAME));
|
||||
assertEquals(Integer.valueOf(2), categories.get(MetricDetail.class));
|
||||
assertEquals(Integer.valueOf(3), categories.get(MetricDetail.class));
|
||||
assertTrue(names.contains(PrecisionAtK.NAME));
|
||||
assertTrue(names.contains(MeanReciprocalRank.NAME));
|
||||
assertTrue(names.contains(DiscountedCumulativeGain.NAME));
|
||||
}
|
||||
|
||||
private static class TrackingActionListener implements ActionListener<Integer> {
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A {@link NodeSelector} that selects nodes that have a particular value
|
||||
* for an attribute.
|
||||
*/
|
||||
public final class HasAttributeNodeSelector implements NodeSelector {
|
||||
private final String key;
|
||||
private final String value;
|
||||
|
||||
public HasAttributeNodeSelector(String key, String value) {
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void select(Iterable<Node> nodes) {
|
||||
Iterator<Node> itr = nodes.iterator();
|
||||
while (itr.hasNext()) {
|
||||
Map<String, List<String>> allAttributes = itr.next().getAttributes();
|
||||
if (allAttributes == null) continue;
|
||||
List<String> values = allAttributes.get(key);
|
||||
if (values == null || false == values.contains(value)) {
|
||||
itr.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return key + "=" + value;
|
||||
}
|
||||
}
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -52,13 +54,18 @@ public class Node {
|
|||
* if we don't know what roles the node has.
|
||||
*/
|
||||
private final Roles roles;
|
||||
/**
|
||||
* Attributes declared on the node.
|
||||
*/
|
||||
private final Map<String, List<String>> attributes;
|
||||
|
||||
/**
|
||||
* Create a {@linkplain Node} with metadata. All parameters except
|
||||
* {@code host} are nullable and implementations of {@link NodeSelector}
|
||||
* need to decide what to do in their absence.
|
||||
*/
|
||||
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles) {
|
||||
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version,
|
||||
Roles roles, Map<String, List<String>> attributes) {
|
||||
if (host == null) {
|
||||
throw new IllegalArgumentException("host cannot be null");
|
||||
}
|
||||
|
@ -67,13 +74,14 @@ public class Node {
|
|||
this.name = name;
|
||||
this.version = version;
|
||||
this.roles = roles;
|
||||
this.attributes = attributes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a {@linkplain Node} without any metadata.
|
||||
*/
|
||||
public Node(HttpHost host) {
|
||||
this(host, null, null, null, null);
|
||||
this(host, null, null, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -115,6 +123,13 @@ public class Node {
|
|||
return roles;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attributes declared on the node.
|
||||
*/
|
||||
public Map<String, List<String>> getAttributes() {
|
||||
return attributes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
|
@ -131,6 +146,9 @@ public class Node {
|
|||
if (roles != null) {
|
||||
b.append(", roles=").append(roles);
|
||||
}
|
||||
if (attributes != null) {
|
||||
b.append(", attributes=").append(attributes);
|
||||
}
|
||||
return b.append(']').toString();
|
||||
}
|
||||
|
||||
|
@ -144,12 +162,13 @@ public class Node {
|
|||
&& Objects.equals(boundHosts, other.boundHosts)
|
||||
&& Objects.equals(name, other.name)
|
||||
&& Objects.equals(version, other.version)
|
||||
&& Objects.equals(roles, other.roles);
|
||||
&& Objects.equals(roles, other.roles)
|
||||
&& Objects.equals(attributes, other.attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(host, boundHosts, name, version, roles);
|
||||
return Objects.hash(host, boundHosts, name, version, roles, attributes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.Node.Roles;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class HasAttributeNodeSelectorTests extends RestClientTestCase {
|
||||
public void testHasAttribute() {
|
||||
Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val")));
|
||||
Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval")));
|
||||
Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval")));
|
||||
Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val")));
|
||||
List<Node> nodes = new ArrayList<>();
|
||||
nodes.add(hasAttributeValue);
|
||||
nodes.add(hasAttributeButNotValue);
|
||||
nodes.add(hasAttributeValueInList);
|
||||
nodes.add(notHasAttribute);
|
||||
List<Node> expected = new ArrayList<>();
|
||||
expected.add(hasAttributeValue);
|
||||
expected.add(hasAttributeValueInList);
|
||||
new HasAttributeNodeSelector("attr", "val").select(nodes);
|
||||
assertEquals(expected, nodes);
|
||||
}
|
||||
|
||||
private static Node dummyNode(Map<String, List<String>> attributes) {
|
||||
return new Node(new HttpHost("dummy"), Collections.<HttpHost>emptySet(),
|
||||
randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5),
|
||||
new Roles(randomBoolean(), randomBoolean(), randomBoolean()),
|
||||
attributes);
|
||||
}
|
||||
}
|
|
@ -63,9 +63,10 @@ public class NodeSelectorTests extends RestClientTestCase {
|
|||
assertEquals(expected, nodes);
|
||||
}
|
||||
|
||||
private Node dummyNode(boolean master, boolean data, boolean ingest) {
|
||||
private static Node dummyNode(boolean master, boolean data, boolean ingest) {
|
||||
return new Node(new HttpHost("dummy"), Collections.<HttpHost>emptySet(),
|
||||
randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5),
|
||||
new Roles(master, data, ingest));
|
||||
new Roles(master, data, ingest),
|
||||
Collections.<String, List<String>>emptyMap());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,49 +23,67 @@ import org.apache.http.HttpHost;
|
|||
import org.elasticsearch.client.Node.Roles;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class NodeTests extends RestClientTestCase {
|
||||
public void testToString() {
|
||||
Map<String, List<String>> attributes = new HashMap<>();
|
||||
attributes.put("foo", singletonList("bar"));
|
||||
attributes.put("baz", Arrays.asList("bort", "zoom"));
|
||||
assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString());
|
||||
assertEquals("[host=http://1, attributes={foo=[bar], baz=[bort, zoom]}]",
|
||||
new Node(new HttpHost("1"), null, null, null, null, attributes).toString());
|
||||
assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"),
|
||||
null, null, null, new Roles(true, true, true)).toString());
|
||||
null, null, null, new Roles(true, true, true), null).toString());
|
||||
assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"),
|
||||
null, null, "ver", null).toString());
|
||||
null, null, "ver", null, null).toString());
|
||||
assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"),
|
||||
null, "nam", null, null).toString());
|
||||
null, "nam", null, null, null).toString());
|
||||
assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"),
|
||||
new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString());
|
||||
assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]",
|
||||
new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null, null).toString());
|
||||
assertEquals(
|
||||
"[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m, attributes={foo=[bar], baz=[bort, zoom]}]",
|
||||
new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))),
|
||||
"nam", "ver", new Roles(true, false, false)).toString());
|
||||
"nam", "ver", new Roles(true, false, false), attributes).toString());
|
||||
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5));
|
||||
Node node = new Node(host,
|
||||
randomBoolean() ? null : singleton(host),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : new Roles(true, true, true));
|
||||
randomBoolean() ? null : singleton(host),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
|
||||
randomBoolean() ? null : new Roles(true, true, true),
|
||||
randomBoolean() ? null : singletonMap("foo", singletonList("bar")));
|
||||
assertFalse(node.equals(null));
|
||||
assertTrue(node.equals(node));
|
||||
assertEquals(node.hashCode(), node.hashCode());
|
||||
Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles());
|
||||
Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(),
|
||||
node.getRoles(), node.getAttributes());
|
||||
assertTrue(node.equals(copy));
|
||||
assertEquals(node.hashCode(), copy.hashCode());
|
||||
assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(),
|
||||
node.getName(), node.getVersion(), node.getRoles())));
|
||||
node.getName(), node.getVersion(), node.getRoles(), node.getAttributes())));
|
||||
assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))),
|
||||
node.getName(), node.getVersion(), node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false))));
|
||||
node.getName(), node.getVersion(), node.getRoles(), node.getAttributes())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed",
|
||||
node.getVersion(), node.getRoles(), node.getAttributes())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(),
|
||||
node.getVersion() + "changed", node.getRoles(), node.getAttributes())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(),
|
||||
node.getVersion(), new Roles(false, false, false), node.getAttributes())));
|
||||
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(),
|
||||
node.getVersion(), node.getRoles(), singletonMap("bort", singletonList("bing")))));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,9 @@ import java.util.concurrent.TimeUnit;
|
|||
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
|
||||
import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
|
@ -214,7 +216,8 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
|
|||
restClient.performRequest(request);
|
||||
fail("expected to fail to connect");
|
||||
} catch (ConnectException e) {
|
||||
assertEquals("Connection refused", e.getMessage());
|
||||
// This is different in windows and linux but this matches both.
|
||||
assertThat(e.getMessage(), startsWith("Connection refused"));
|
||||
}
|
||||
} else {
|
||||
Response response = restClient.performRequest(request);
|
||||
|
|
|
@ -342,7 +342,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
List<Node> newNodes = new ArrayList<>(nodes.size());
|
||||
for (int i = 0; i < nodes.size(); i++) {
|
||||
Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false);
|
||||
newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles));
|
||||
newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null));
|
||||
}
|
||||
restClient.setNodes(newNodes);
|
||||
int rounds = between(1, 10);
|
||||
|
|
|
@ -341,9 +341,9 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
public void testSelectHosts() throws IOException {
|
||||
Node n1 = new Node(new HttpHost("1"), null, null, "1", null);
|
||||
Node n2 = new Node(new HttpHost("2"), null, null, "2", null);
|
||||
Node n3 = new Node(new HttpHost("3"), null, null, "3", null);
|
||||
Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null);
|
||||
Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null);
|
||||
Node n3 = new Node(new HttpHost("3"), null, null, "3", null, null);
|
||||
|
||||
NodeSelector not1 = new NodeSelector() {
|
||||
@Override
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.http.nio.entity.NStringEntity;
|
|||
import org.apache.http.ssl.SSLContextBuilder;
|
||||
import org.apache.http.ssl.SSLContexts;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.HasAttributeNodeSelector;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.NodeSelector;
|
||||
|
@ -190,11 +191,20 @@ public class RestClientDocumentation {
|
|||
//tag::rest-client-options-set-singleton
|
||||
request.setOptions(COMMON_OPTIONS);
|
||||
//end::rest-client-options-set-singleton
|
||||
//tag::rest-client-options-customize
|
||||
RequestOptions.Builder options = COMMON_OPTIONS.toBuilder();
|
||||
options.addHeader("cats", "knock things off of other things");
|
||||
request.setOptions(options);
|
||||
//end::rest-client-options-customize
|
||||
{
|
||||
//tag::rest-client-options-customize-header
|
||||
RequestOptions.Builder options = COMMON_OPTIONS.toBuilder();
|
||||
options.addHeader("cats", "knock things off of other things");
|
||||
request.setOptions(options);
|
||||
//end::rest-client-options-customize-header
|
||||
}
|
||||
{
|
||||
//tag::rest-client-options-customize-attribute
|
||||
RequestOptions.Builder options = COMMON_OPTIONS.toBuilder();
|
||||
options.setNodeSelector(new HasAttributeNodeSelector("rack", "c12")); // <1>
|
||||
request.setOptions(options);
|
||||
//end::rest-client-options-customize-attribute
|
||||
}
|
||||
}
|
||||
{
|
||||
HttpEntity[] documents = new HttpEntity[10];
|
||||
|
|
|
@ -36,12 +36,18 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back.
|
||||
* Compatible with elasticsearch 2.x+.
|
||||
|
@ -138,16 +144,19 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer {
|
|||
Set<HttpHost> boundHosts = new HashSet<>();
|
||||
String name = null;
|
||||
String version = null;
|
||||
String fieldName = null;
|
||||
// Used to read roles from 5.0+
|
||||
/*
|
||||
* Multi-valued attributes come with key = `real_key.index` and we
|
||||
* unflip them after reading them because we can't rely on the order
|
||||
* that they arive.
|
||||
*/
|
||||
final Map<String, String> protoAttributes = new HashMap<String, String>();
|
||||
|
||||
boolean sawRoles = false;
|
||||
boolean master = false;
|
||||
boolean data = false;
|
||||
boolean ingest = false;
|
||||
// Used to read roles from 2.x
|
||||
Boolean masterAttribute = null;
|
||||
Boolean dataAttribute = null;
|
||||
boolean clientAttribute = false;
|
||||
|
||||
String fieldName = null;
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
|
||||
fieldName = parser.getCurrentName();
|
||||
|
@ -170,13 +179,12 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer {
|
|||
}
|
||||
} else if ("attributes".equals(fieldName)) {
|
||||
while (parser.nextToken() != JsonToken.END_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) {
|
||||
masterAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) {
|
||||
dataAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) {
|
||||
clientAttribute = toBoolean(parser.getValueAsString());
|
||||
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
|
||||
if (parser.getCurrentToken() == JsonToken.VALUE_STRING) {
|
||||
String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString());
|
||||
if (oldValue != null) {
|
||||
throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]");
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
|
@ -216,21 +224,74 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer {
|
|||
if (publishedHost == null) {
|
||||
logger.debug("skipping node [" + nodeId + "] with http disabled");
|
||||
return null;
|
||||
} else {
|
||||
logger.trace("adding node [" + nodeId + "]");
|
||||
if (version.startsWith("2.")) {
|
||||
/*
|
||||
* 2.x doesn't send roles, instead we try to read them from
|
||||
* attributes.
|
||||
*/
|
||||
master = masterAttribute == null ? false == clientAttribute : masterAttribute;
|
||||
data = dataAttribute == null ? false == clientAttribute : dataAttribute;
|
||||
} else {
|
||||
assert sawRoles : "didn't see roles for [" + nodeId + "]";
|
||||
}
|
||||
|
||||
Map<String, List<String>> realAttributes = new HashMap<>(protoAttributes.size());
|
||||
List<String> keys = new ArrayList<>(protoAttributes.keySet());
|
||||
for (String key : keys) {
|
||||
if (key.endsWith(".0")) {
|
||||
String realKey = key.substring(0, key.length() - 2);
|
||||
List<String> values = new ArrayList<>();
|
||||
int i = 0;
|
||||
while (true) {
|
||||
String value = protoAttributes.remove(realKey + "." + i);
|
||||
if (value == null) {
|
||||
break;
|
||||
}
|
||||
values.add(value);
|
||||
i++;
|
||||
}
|
||||
realAttributes.put(realKey, unmodifiableList(values));
|
||||
}
|
||||
assert boundHosts.contains(publishedHost) :
|
||||
"[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts";
|
||||
return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest));
|
||||
}
|
||||
for (Map.Entry<String, String> entry : protoAttributes.entrySet()) {
|
||||
realAttributes.put(entry.getKey(), singletonList(entry.getValue()));
|
||||
}
|
||||
|
||||
if (version.startsWith("2.")) {
|
||||
/*
|
||||
* 2.x doesn't send roles, instead we try to read them from
|
||||
* attributes.
|
||||
*/
|
||||
boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false);
|
||||
Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null);
|
||||
Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null);
|
||||
master = masterAttribute == null ? false == clientAttribute : masterAttribute;
|
||||
data = dataAttribute == null ? false == clientAttribute : dataAttribute;
|
||||
} else {
|
||||
assert sawRoles : "didn't see roles for [" + nodeId + "]";
|
||||
}
|
||||
assert boundHosts.contains(publishedHost) :
|
||||
"[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts";
|
||||
logger.trace("adding node [" + nodeId + "]");
|
||||
return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest),
|
||||
unmodifiableMap(realAttributes));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code defaultValue} if the attribute didn't come back,
|
||||
* {@code true} or {@code false} if it did come back as
|
||||
* either of those, or throws an IOException if the attribute
|
||||
* came back in a strange way.
|
||||
*/
|
||||
private static Boolean v2RoleAttributeValue(Map<String, List<String>> attributes,
|
||||
String name, Boolean defaultValue) throws IOException {
|
||||
List<String> valueList = attributes.remove(name);
|
||||
if (valueList == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
if (valueList.size() != 1) {
|
||||
throw new IOException("expected only a single attribute value for [" + name + "] but got "
|
||||
+ valueList);
|
||||
}
|
||||
switch (valueList.get(0)) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IOException("expected [" + name + "] to be either [true] or [false] but was ["
|
||||
+ valueList.get(0) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,15 +309,4 @@ public final class ElasticsearchNodesSniffer implements NodesSniffer {
|
|||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean toBoolean(String string) {
|
||||
switch (string) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("[" + string + "] is not a valid boolean");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,14 +30,18 @@ import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
/**
|
||||
|
@ -53,10 +57,14 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase {
|
|||
try {
|
||||
HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON);
|
||||
List<Node> nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory());
|
||||
// Use these assertions because the error messages are nicer than hasItems.
|
||||
/*
|
||||
* Use these assertions because the error messages are nicer
|
||||
* than hasItems and we know the results are in order because
|
||||
* that is how we generated the file.
|
||||
*/
|
||||
assertThat(nodes, hasSize(expected.length));
|
||||
for (Node expectedNode : expected) {
|
||||
assertThat(nodes, hasItem(expectedNode));
|
||||
for (int i = 0; i < expected.length; i++) {
|
||||
assertEquals(expected[i], nodes.get(i));
|
||||
}
|
||||
} finally {
|
||||
in.close();
|
||||
|
@ -66,13 +74,13 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase {
|
|||
public void test2x() throws IOException {
|
||||
checkFile("2.0.0_nodes_http.json",
|
||||
node(9200, "m1", "2.0.0", true, false, false),
|
||||
node(9202, "m2", "2.0.0", true, true, false),
|
||||
node(9201, "m3", "2.0.0", true, false, false),
|
||||
node(9205, "d1", "2.0.0", false, true, false),
|
||||
node(9201, "m2", "2.0.0", true, true, false),
|
||||
node(9202, "m3", "2.0.0", true, false, false),
|
||||
node(9203, "d1", "2.0.0", false, true, false),
|
||||
node(9204, "d2", "2.0.0", false, true, false),
|
||||
node(9203, "d3", "2.0.0", false, true, false),
|
||||
node(9207, "c1", "2.0.0", false, false, false),
|
||||
node(9206, "c2", "2.0.0", false, false, false));
|
||||
node(9205, "d3", "2.0.0", false, true, false),
|
||||
node(9206, "c1", "2.0.0", false, false, false),
|
||||
node(9207, "c2", "2.0.0", false, false, false));
|
||||
}
|
||||
|
||||
public void test5x() throws IOException {
|
||||
|
@ -104,6 +112,10 @@ public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase {
|
|||
Set<HttpHost> boundHosts = new HashSet<>(2);
|
||||
boundHosts.add(host);
|
||||
boundHosts.add(new HttpHost("[::1]", port));
|
||||
return new Node(host, boundHosts, name, version, new Roles(master, data, ingest));
|
||||
Map<String, List<String>> attributes = new HashMap<>();
|
||||
attributes.put("dummy", singletonList("everyone_has_me"));
|
||||
attributes.put("number", singletonList(name.substring(1)));
|
||||
attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1)));
|
||||
return new Node(host, boundHosts, name, version, new Roles(master, data, ingest), attributes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -200,9 +200,21 @@ public class ElasticsearchNodesSnifferTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
int numAttributes = between(0, 5);
|
||||
Map<String, List<String>> attributes = new HashMap<>(numAttributes);
|
||||
for (int j = 0; j < numAttributes; j++) {
|
||||
int numValues = frequently() ? 1 : between(2, 5);
|
||||
List<String> values = new ArrayList<>();
|
||||
for (int v = 0; v < numValues; v++) {
|
||||
values.add(j + "value" + v);
|
||||
}
|
||||
attributes.put("attr" + j, values);
|
||||
}
|
||||
|
||||
Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5),
|
||||
randomAsciiAlphanumOfLength(5),
|
||||
new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()),
|
||||
attributes);
|
||||
|
||||
generator.writeObjectFieldStart(nodeId);
|
||||
if (getRandom().nextBoolean()) {
|
||||
|
@ -256,18 +268,17 @@ public class ElasticsearchNodesSnifferTests extends RestClientTestCase {
|
|||
generator.writeFieldName("name");
|
||||
generator.writeString(node.getName());
|
||||
|
||||
int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
|
||||
Map<String, String> attributes = new HashMap<>(numAttributes);
|
||||
for (int j = 0; j < numAttributes; j++) {
|
||||
attributes.put("attr" + j, "value" + j);
|
||||
}
|
||||
if (numAttributes > 0) {
|
||||
generator.writeObjectFieldStart("attributes");
|
||||
}
|
||||
for (Map.Entry<String, String> entry : attributes.entrySet()) {
|
||||
generator.writeStringField(entry.getKey(), entry.getValue());
|
||||
}
|
||||
if (numAttributes > 0) {
|
||||
for (Map.Entry<String, List<String>> entry : attributes.entrySet()) {
|
||||
if (entry.getValue().size() == 1) {
|
||||
generator.writeStringField(entry.getKey(), entry.getValue().get(0));
|
||||
} else {
|
||||
for (int v = 0; v < entry.getValue().size(); v++) {
|
||||
generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v));
|
||||
}
|
||||
}
|
||||
}
|
||||
generator.writeEndObject();
|
||||
}
|
||||
generator.writeEndObject();
|
||||
|
|
|
@ -1,140 +1,200 @@
|
|||
{
|
||||
"cluster_name" : "elasticsearch",
|
||||
"nodes" : {
|
||||
"qYUZ_8bTRwODPxukDlFw6Q" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9204",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
"cluster_name": "elasticsearch",
|
||||
"nodes": {
|
||||
"qr-SOrELSaGW8SlU8nflBw": {
|
||||
"name": "m1",
|
||||
"transport_address": "127.0.0.1:9300",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9200",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "m",
|
||||
"data": "false",
|
||||
"array.1": "1",
|
||||
"master": "true"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9200",
|
||||
"[::1]:9200"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"Yej5UVNgR2KgBjUFHOQpCw" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9207",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"master" : "false"
|
||||
"osfiXxUOQzCVIs-eepgSCA": {
|
||||
"name": "m2",
|
||||
"transport_address": "127.0.0.1:9301",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9201",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "m",
|
||||
"array.1": "2",
|
||||
"master": "true"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9201",
|
||||
"[::1]:9201"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"mHttJwhwReangKEx9EGuAg" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9201",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"master" : "true"
|
||||
"lazeJFiIQ8eHHV4GeIdMPg": {
|
||||
"name": "m3",
|
||||
"transport_address": "127.0.0.1:9302",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9202",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "m",
|
||||
"data": "false",
|
||||
"array.1": "3",
|
||||
"master": "true"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9202",
|
||||
"[::1]:9202"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"6Erdptt_QRGLxMiLi9mTkg" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9206",
|
||||
"attributes" : {
|
||||
"data" : "false",
|
||||
"client" : "true"
|
||||
"t9WxK-fNRsqV5G0Mm09KpQ": {
|
||||
"name": "d1",
|
||||
"transport_address": "127.0.0.1:9303",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9203",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "d",
|
||||
"array.1": "1",
|
||||
"master": "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9203",
|
||||
"[::1]:9203"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"mLRCZBypTiys6e8KY5DMnA" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9200",
|
||||
"attributes" : {
|
||||
"data" : "false"
|
||||
"wgoDzluvTViwUjEsmVesKw": {
|
||||
"name": "d2",
|
||||
"transport_address": "127.0.0.1:9304",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9204",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "d",
|
||||
"array.1": "2",
|
||||
"master": "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9204",
|
||||
"[::1]:9204"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"pVqOhytXQwetsZVzCBppYw" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9202",
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"6j_t3pPhSm-oRTyypTzu5g": {
|
||||
"name": "d3",
|
||||
"transport_address": "127.0.0.1:9305",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9205",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "d",
|
||||
"array.1": "3",
|
||||
"master": "false"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9205",
|
||||
"[::1]:9205"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"ARyzVfpJSw2a9TOIUpbsBA" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9205",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
"PaEkm0z7Ssiuyfkh3aASag": {
|
||||
"name": "c1",
|
||||
"transport_address": "127.0.0.1:9306",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9206",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "c",
|
||||
"data": "false",
|
||||
"array.1": "1",
|
||||
"master": "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9206",
|
||||
"[::1]:9206"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"2Hpid-g5Sc2BKCevhN6VQw" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "2.0.0",
|
||||
"build" : "de54438",
|
||||
"http_address" : "127.0.0.1:9203",
|
||||
"attributes" : {
|
||||
"master" : "false"
|
||||
"LAFKr2K_QmupqnM_atJqkQ": {
|
||||
"name": "c2",
|
||||
"transport_address": "127.0.0.1:9307",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "2.0.0",
|
||||
"build": "de54438",
|
||||
"http_address": "127.0.0.1:9207",
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "c",
|
||||
"data": "false",
|
||||
"array.1": "2",
|
||||
"master": "false"
|
||||
},
|
||||
"http" : {
|
||||
"bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"127.0.0.1:9207",
|
||||
"[::1]:9207"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,168 +1,216 @@
|
|||
{
|
||||
"_nodes" : {
|
||||
"total" : 8,
|
||||
"successful" : 8,
|
||||
"failed" : 0
|
||||
"_nodes": {
|
||||
"total": 8,
|
||||
"successful": 8,
|
||||
"failed": 0
|
||||
},
|
||||
"cluster_name" : "test",
|
||||
"nodes" : {
|
||||
"DXz_rhcdSF2xJ96qyjaLVw" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"cluster_name": "elasticsearch",
|
||||
"nodes": {
|
||||
"0S4r3NurTYSFSb8R9SxwWA": {
|
||||
"name": "m1",
|
||||
"transport_address": "127.0.0.1:9300",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "m",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9200",
|
||||
"127.0.0.1:9200"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"publish_address": "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"53Mi6jYdRgeR1cdyuoNfQQ" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"k_CBrMXARkS57Qb5-3Mw5g": {
|
||||
"name": "m2",
|
||||
"transport_address": "127.0.0.1:9301",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"master",
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "m",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9201",
|
||||
"127.0.0.1:9201"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"publish_address": "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"XBIghcHiRlWP9c4vY6rETw" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"cFM30FlyS8K1njH_bovwwQ" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"eoVUVRGNRDyyOapqIcrsIA" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"xPN76uDcTP-DyXaRzPg2NQ" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"RY0oW2d7TISEqazk-U4Kcw" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"tU0rXEZmQ9GsWfn2TQ4kow" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "5.0.0",
|
||||
"build_hash" : "253032b",
|
||||
"roles" : [
|
||||
"6eynRPQ1RleJTeGDuTR9mw": {
|
||||
"name": "m3",
|
||||
"transport_address": "127.0.0.1:9302",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "m",
|
||||
"array.1": "3"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9202",
|
||||
"127.0.0.1:9202"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"publish_address": "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"cbGC-ay1QNWaESvEh5513w": {
|
||||
"name": "d1",
|
||||
"transport_address": "127.0.0.1:9303",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "d",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"LexndPpXR2ytYsU5fTElnQ": {
|
||||
"name": "d2",
|
||||
"transport_address": "127.0.0.1:9304",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "d",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"SbNG1DKYSBu20zfOz2gDZQ": {
|
||||
"name": "d3",
|
||||
"transport_address": "127.0.0.1:9305",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "d",
|
||||
"array.1": "3"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"fM4H-m2WTDWmsGsL7jIJew": {
|
||||
"name": "c1",
|
||||
"transport_address": "127.0.0.1:9306",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "c",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"pFoh7d0BTbqqI3HKd9na5A": {
|
||||
"name": "c2",
|
||||
"transport_address": "127.0.0.1:9307",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "5.0.0",
|
||||
"build_hash": "253032b",
|
||||
"roles": [
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "c",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,168 +1,216 @@
|
|||
{
|
||||
"_nodes" : {
|
||||
"total" : 8,
|
||||
"successful" : 8,
|
||||
"failed" : 0
|
||||
"_nodes": {
|
||||
"total": 8,
|
||||
"successful": 8,
|
||||
"failed": 0
|
||||
},
|
||||
"cluster_name" : "test",
|
||||
"nodes" : {
|
||||
"FX9npqGQSL2mOGF8Zkf3hw" : {
|
||||
"name" : "m2",
|
||||
"transport_address" : "127.0.0.1:9301",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9201",
|
||||
"127.0.0.1:9201"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"jmUqzYLGTbWCg127kve3Tg" : {
|
||||
"name" : "d1",
|
||||
"transport_address" : "127.0.0.1:9303",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"soBU6bzvTOqdLxPstSbJ2g" : {
|
||||
"name" : "d3",
|
||||
"transport_address" : "127.0.0.1:9305",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"mtYDAhURTP6twdmNAkMnOg" : {
|
||||
"name" : "m3",
|
||||
"transport_address" : "127.0.0.1:9302",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"cluster_name": "elasticsearch",
|
||||
"nodes": {
|
||||
"ikXK_skVTfWkhONhldnbkw": {
|
||||
"name": "m1",
|
||||
"transport_address": "127.0.0.1:9300",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9202",
|
||||
"127.0.0.1:9202"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"URxHiUQPROOt1G22Ev6lXw" : {
|
||||
"name" : "c2",
|
||||
"transport_address" : "127.0.0.1:9307",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"_06S_kWoRqqFR8Z8CS3JRw" : {
|
||||
"name" : "c1",
|
||||
"transport_address" : "127.0.0.1:9306",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"QZE5Bd6DQJmnfVs2dglOvA" : {
|
||||
"name" : "d2",
|
||||
"transport_address" : "127.0.0.1:9304",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
}
|
||||
},
|
||||
"_3mTXg6dSweZn5ReB2fQqw" : {
|
||||
"name" : "m1",
|
||||
"transport_address" : "127.0.0.1:9300",
|
||||
"host" : "127.0.0.1",
|
||||
"ip" : "127.0.0.1",
|
||||
"version" : "6.0.0",
|
||||
"build_hash" : "8f0685b",
|
||||
"roles" : [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"http" : {
|
||||
"bound_address" : [
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "m",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9200",
|
||||
"127.0.0.1:9200"
|
||||
],
|
||||
"publish_address" : "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes" : 104857600
|
||||
"publish_address": "127.0.0.1:9200",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"TMHa34w4RqeuYoHCfJGXZg": {
|
||||
"name": "m2",
|
||||
"transport_address": "127.0.0.1:9301",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"master",
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "m",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9201",
|
||||
"127.0.0.1:9201"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9201",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"lzaMRJTVT166sgVZdQ5thA": {
|
||||
"name": "m3",
|
||||
"transport_address": "127.0.0.1:9302",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"master",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "m",
|
||||
"array.1": "3"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9202",
|
||||
"127.0.0.1:9202"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9202",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"tGP5sUecSd6BLTWk1NWF8Q": {
|
||||
"name": "d1",
|
||||
"transport_address": "127.0.0.1:9303",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "d",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9203",
|
||||
"127.0.0.1:9203"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9203",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"c1UgW5ROTkSa2YnM_T56tw": {
|
||||
"name": "d2",
|
||||
"transport_address": "127.0.0.1:9304",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "d",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9204",
|
||||
"127.0.0.1:9204"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9204",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"QM9yjqjmS72MstpNYV_trg": {
|
||||
"name": "d3",
|
||||
"transport_address": "127.0.0.1:9305",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"data",
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "3",
|
||||
"array.0": "d",
|
||||
"array.1": "3"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9205",
|
||||
"127.0.0.1:9205"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9205",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"wLtzAssoQYeX_4TstgCj0Q": {
|
||||
"name": "c1",
|
||||
"transport_address": "127.0.0.1:9306",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "1",
|
||||
"array.0": "c",
|
||||
"array.1": "1"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9206",
|
||||
"127.0.0.1:9206"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9206",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
},
|
||||
"ONOzpst8TH-ZebG7fxGwaA": {
|
||||
"name": "c2",
|
||||
"transport_address": "127.0.0.1:9307",
|
||||
"host": "127.0.0.1",
|
||||
"ip": "127.0.0.1",
|
||||
"version": "6.0.0",
|
||||
"build_hash": "8f0685b",
|
||||
"roles": [
|
||||
"ingest"
|
||||
],
|
||||
"attributes": {
|
||||
"dummy": "everyone_has_me",
|
||||
"number": "2",
|
||||
"array.0": "c",
|
||||
"array.1": "2"
|
||||
},
|
||||
"http": {
|
||||
"bound_address": [
|
||||
"[::1]:9207",
|
||||
"127.0.0.1:9207"
|
||||
],
|
||||
"publish_address": "127.0.0.1:9207",
|
||||
"max_content_length_in_bytes": 104857600
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Recreates the v_nodes_http.json files in this directory. This is
|
||||
# meant to be an "every once in a while" thing that we do only when
|
||||
# we want to add a new version of Elasticsearch or configure the
|
||||
# nodes differently. That is why we don't do this in gradle. It also
|
||||
# allows us to play fast and loose with error handling. If something
|
||||
# goes wrong you have to manually clean up which is good because it
|
||||
# leaves around the kinds of things that we need to debug the failure.
|
||||
|
||||
# I built this file so the next time I have to regenerate these
|
||||
# v_nodes_http.json files I won't have to reconfigure Elasticsearch
|
||||
# from scratch. While I was at it I took the time to make sure that
|
||||
# when we do rebuild the files they don't jump around too much. That
|
||||
# way the diffs are smaller.
|
||||
|
||||
set -e
|
||||
|
||||
script_path="$( cd "$(dirname "$0")" ; pwd -P )"
|
||||
work=$(mktemp -d)
|
||||
pushd ${work} >> /dev/null
|
||||
echo Working in ${work}
|
||||
|
||||
wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz
|
||||
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz
|
||||
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz
|
||||
sha1sum -c - << __SHAs
|
||||
e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz
|
||||
d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz
|
||||
__SHAs
|
||||
sha512sum -c - << __SHAs
|
||||
25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz
|
||||
__SHAs
|
||||
|
||||
|
||||
function do_version() {
|
||||
local version=$1
|
||||
local nodes='m1 m2 m3 d1 d2 d3 c1 c2'
|
||||
rm -rf ${version}
|
||||
mkdir -p ${version}
|
||||
pushd ${version} >> /dev/null
|
||||
|
||||
tar xf ../elasticsearch-${version}.tar.gz
|
||||
local http_port=9200
|
||||
for node in ${nodes}; do
|
||||
mkdir ${node}
|
||||
cp -r elasticsearch-${version}/* ${node}
|
||||
local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false)
|
||||
local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false)
|
||||
# m2 is always master and data for these test just so we have a node like that
|
||||
data=$([[ "$node" == 'm2' ]] && echo true || echo ${data})
|
||||
local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr')
|
||||
local transport_port=$((http_port+100))
|
||||
|
||||
cat >> ${node}/config/elasticsearch.yml << __ES_YML
|
||||
node.name: ${node}
|
||||
node.master: ${master}
|
||||
node.data: ${data}
|
||||
node${attr}.dummy: everyone_has_me
|
||||
node${attr}.number: ${node:1}
|
||||
node${attr}.array: [${node:0:1}, ${node:1}]
|
||||
http.port: ${http_port}
|
||||
transport.tcp.port: ${transport_port}
|
||||
discovery.zen.minimum_master_nodes: 3
|
||||
discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302']
|
||||
__ES_YML
|
||||
|
||||
if [ ${version} != '2.0.0' ]; then
|
||||
perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options
|
||||
fi
|
||||
|
||||
echo "starting ${version}/${node}..."
|
||||
${node}/bin/elasticsearch -d -p ${node}/pidfile
|
||||
|
||||
((http_port++))
|
||||
done
|
||||
|
||||
echo "waiting for cluster to form"
|
||||
# got to wait for all the nodes
|
||||
until curl -s localhost:9200; do
|
||||
sleep .25
|
||||
done
|
||||
|
||||
echo "waiting for all nodes to join"
|
||||
until [ $(echo ${nodes} | wc -w) -eq $(curl -s localhost:9200/_cat/nodes | wc -l) ]; do
|
||||
sleep .25
|
||||
done
|
||||
|
||||
# jq sorts the nodes by their http host so the file doesn't jump around when we regenerate it
|
||||
curl -s localhost:9200/_nodes/http?pretty \
|
||||
| jq '[to_entries[] | ( select(.key == "nodes").value|to_entries|sort_by(.value.http.publish_address)|from_entries|{"key": "nodes", "value": .} ) // .] | from_entries' \
|
||||
> ${script_path}/${version}_nodes_http.json
|
||||
|
||||
for node in ${nodes}; do
|
||||
echo "stopping ${version}/${node}..."
|
||||
kill $(cat ${node}/pidfile)
|
||||
done
|
||||
|
||||
popd >> /dev/null
|
||||
}
|
||||
|
||||
JAVA_HOME=$JAVA8_HOME do_version 2.0.0
|
||||
JAVA_HOME=$JAVA8_HOME do_version 5.0.0
|
||||
JAVA_HOME=$JAVA8_HOME do_version 6.0.0
|
||||
|
||||
popd >> /dev/null
|
||||
rm -rf ${work}
|
|
@ -2,3 +2,5 @@
|
|||
few nodes in different configurations locally at various versions. They are
|
||||
for testing `ElasticsearchNodesSniffer` against different versions of
|
||||
Elasticsearch.
|
||||
|
||||
See create_test_nodes_info.bash for how to create these.
|
||||
|
|
|
@ -122,7 +122,7 @@ case "$1" in
|
|||
ulimit -l $MAX_LOCKED_MEMORY
|
||||
fi
|
||||
|
||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
|
||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then
|
||||
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
|
||||
fi
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ start() {
|
|||
if [ -n "$MAX_LOCKED_MEMORY" ]; then
|
||||
ulimit -l $MAX_LOCKED_MEMORY
|
||||
fi
|
||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
|
||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then
|
||||
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
|
||||
fi
|
||||
|
||||
|
|
|
@ -312,9 +312,17 @@ adds an extra header:
|
|||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize]
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-header]
|
||||
--------------------------------------------------
|
||||
|
||||
Or you can send requests to nodes with a particular attribute:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-attribute]
|
||||
--------------------------------------------------
|
||||
<1> Replace the node selector with one that selects nodes on a particular rack.
|
||||
|
||||
|
||||
==== Multiple parallel asynchronous actions
|
||||
|
||||
|
|
|
@ -284,22 +284,7 @@ You may further restrict the permissions by specifying a prefix within the bucke
|
|||
// NOTCONSOLE
|
||||
|
||||
The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository
|
||||
registration will fail. If you want Elasticsearch to create the bucket instead, you can add the permission to create a
|
||||
specific bucket like this:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"Action": [
|
||||
"s3:CreateBucket"
|
||||
],
|
||||
"Effect": "Allow",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::snaps.example.com"
|
||||
]
|
||||
}
|
||||
----
|
||||
// NOTCONSOLE
|
||||
registration will fail.
|
||||
|
||||
[[repository-s3-aws-vpc]]
|
||||
[float]
|
||||
|
|
|
@ -279,6 +279,13 @@ docker build --tag=elasticsearch-custom .
|
|||
docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom
|
||||
--------------------------------------------
|
||||
|
||||
Some plugins require additional security permissions. You have to explicitly accept
|
||||
them either by attaching a `tty` when you run the Docker image and accepting yes at
|
||||
the prompts, or inspecting the security permissions separately and if you are
|
||||
comfortable with them adding the `--batch` flag to the plugin install command.
|
||||
See {plugins}/_other_command_line_parameters.html[Plugin Management documentation]
|
||||
for more details.
|
||||
|
||||
===== D. Override the image's default https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options[CMD]
|
||||
|
||||
Options can be passed as command-line options to the {es} process by
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
bf2cfa0551ebdf08a2cf3079f3c74643bd9dbb76
|
|
@ -0,0 +1 @@
|
|||
a57659a275921d8ab3f7ec580e9bf713ce6143b1
|
|
@ -376,7 +376,8 @@ public final class Def {
|
|||
ref.delegateClassName,
|
||||
ref.delegateInvokeType,
|
||||
ref.delegateMethodName,
|
||||
ref.delegateMethodType
|
||||
ref.delegateMethodType,
|
||||
ref.isDelegateInterface ? 1 : 0
|
||||
);
|
||||
return callSite.dynamicInvoker().asType(MethodType.methodType(clazz.clazz, captures));
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.painless.spi.Whitelist;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
|
||||
import java.lang.invoke.MethodHandle;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
|
@ -202,16 +203,28 @@ public final class Definition {
|
|||
|
||||
public void write(MethodWriter writer) {
|
||||
final org.objectweb.asm.Type type;
|
||||
final Class<?> clazz;
|
||||
if (augmentation != null) {
|
||||
assert java.lang.reflect.Modifier.isStatic(modifiers);
|
||||
clazz = augmentation;
|
||||
type = org.objectweb.asm.Type.getType(augmentation);
|
||||
} else {
|
||||
clazz = owner.clazz;
|
||||
type = owner.type;
|
||||
}
|
||||
|
||||
if (java.lang.reflect.Modifier.isStatic(modifiers)) {
|
||||
writer.invokeStatic(type, method);
|
||||
} else if (java.lang.reflect.Modifier.isInterface(owner.clazz.getModifiers())) {
|
||||
// invokeStatic assumes that the owner class is not an interface, so this is a
|
||||
// special case for interfaces where the interface method boolean needs to be set to
|
||||
// true to reference the appropriate class constant when calling a static interface
|
||||
// method since java 8 did not check, but java 9 and 10 do
|
||||
if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) {
|
||||
writer.visitMethodInsn(Opcodes.INVOKESTATIC,
|
||||
type.getInternalName(), name, getMethodType().toMethodDescriptorString(), true);
|
||||
} else {
|
||||
writer.invokeStatic(type, method);
|
||||
}
|
||||
} else if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) {
|
||||
writer.invokeInterface(type, method);
|
||||
} else {
|
||||
writer.invokeVirtual(type, method);
|
||||
|
|
|
@ -66,6 +66,9 @@ public class FunctionRef {
|
|||
/** delegate method type method as type */
|
||||
public final Type delegateType;
|
||||
|
||||
/** whether a call is made on a delegate interface */
|
||||
public final boolean isDelegateInterface;
|
||||
|
||||
/**
|
||||
* Creates a new FunctionRef, which will resolve {@code type::call} from the whitelist.
|
||||
* @param definition the whitelist against which this script is being compiled
|
||||
|
@ -97,10 +100,13 @@ public class FunctionRef {
|
|||
// the Painless$Script class can be inferred if owner is null
|
||||
if (delegateMethod.owner == null) {
|
||||
delegateClassName = CLASS_NAME;
|
||||
isDelegateInterface = false;
|
||||
} else if (delegateMethod.augmentation != null) {
|
||||
delegateClassName = delegateMethod.augmentation.getName();
|
||||
isDelegateInterface = delegateMethod.augmentation.isInterface();
|
||||
} else {
|
||||
delegateClassName = delegateMethod.owner.clazz.getName();
|
||||
isDelegateInterface = delegateMethod.owner.clazz.isInterface();
|
||||
}
|
||||
|
||||
if ("<init>".equals(delegateMethod.name)) {
|
||||
|
@ -139,6 +145,7 @@ public class FunctionRef {
|
|||
delegateInvokeType = H_INVOKESTATIC;
|
||||
this.delegateMethodName = delegateMethodName;
|
||||
this.delegateMethodType = delegateMethodType.dropParameterTypes(0, numCaptures);
|
||||
isDelegateInterface = false;
|
||||
|
||||
this.interfaceMethod = null;
|
||||
delegateMethod = null;
|
||||
|
|
|
@ -188,6 +188,10 @@ public final class LambdaBootstrap {
|
|||
* @param delegateMethodName The name of the method to be called in the Painless script class
|
||||
* @param delegateMethodType The type of method call in the Painless script class without
|
||||
* the captured types
|
||||
* @param isDelegateInterface If the method to be called is owned by an interface where
|
||||
* if the value is '1' if the delegate is an interface and '0'
|
||||
* otherwise; note this is an int because the bootstrap method
|
||||
* cannot convert constants to boolean
|
||||
* @return A {@link CallSite} linked to a factory method for creating a lambda class
|
||||
* that implements the expected functional interface
|
||||
* @throws LambdaConversionException Thrown when an illegal type conversion occurs at link time
|
||||
|
@ -200,7 +204,8 @@ public final class LambdaBootstrap {
|
|||
String delegateClassName,
|
||||
int delegateInvokeType,
|
||||
String delegateMethodName,
|
||||
MethodType delegateMethodType)
|
||||
MethodType delegateMethodType,
|
||||
int isDelegateInterface)
|
||||
throws LambdaConversionException {
|
||||
Loader loader = (Loader)lookup.lookupClass().getClassLoader();
|
||||
String lambdaClassName = Type.getInternalName(lookup.lookupClass()) + "$$Lambda" + loader.newLambdaIdentifier();
|
||||
|
@ -225,7 +230,7 @@ public final class LambdaBootstrap {
|
|||
|
||||
generateInterfaceMethod(cw, factoryMethodType, lambdaClassType, interfaceMethodName,
|
||||
interfaceMethodType, delegateClassType, delegateInvokeType,
|
||||
delegateMethodName, delegateMethodType, captures);
|
||||
delegateMethodName, delegateMethodType, isDelegateInterface == 1, captures);
|
||||
|
||||
endLambdaClass(cw);
|
||||
|
||||
|
@ -369,6 +374,7 @@ public final class LambdaBootstrap {
|
|||
int delegateInvokeType,
|
||||
String delegateMethodName,
|
||||
MethodType delegateMethodType,
|
||||
boolean isDelegateInterface,
|
||||
Capture[] captures)
|
||||
throws LambdaConversionException {
|
||||
|
||||
|
@ -434,7 +440,7 @@ public final class LambdaBootstrap {
|
|||
Handle delegateHandle =
|
||||
new Handle(delegateInvokeType, delegateClassType.getInternalName(),
|
||||
delegateMethodName, delegateMethodType.toMethodDescriptorString(),
|
||||
delegateInvokeType == H_INVOKEINTERFACE);
|
||||
isDelegateInterface);
|
||||
iface.invokeDynamic(delegateMethodName, Type.getMethodType(interfaceMethodType
|
||||
.toMethodDescriptorString()).getDescriptor(), DELEGATE_BOOTSTRAP_HANDLE,
|
||||
delegateHandle);
|
||||
|
|
|
@ -141,8 +141,8 @@ public final class WriterConstants {
|
|||
|
||||
/** invokedynamic bootstrap for lambda expression/method references */
|
||||
public static final MethodType LAMBDA_BOOTSTRAP_TYPE =
|
||||
MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class,
|
||||
MethodType.class, MethodType.class, String.class, int.class, String.class, MethodType.class);
|
||||
MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class,
|
||||
MethodType.class, String.class, int.class, String.class, MethodType.class, int.class);
|
||||
public static final Handle LAMBDA_BOOTSTRAP_HANDLE =
|
||||
new Handle(Opcodes.H_INVOKESTATIC, Type.getInternalName(LambdaBootstrap.class),
|
||||
"lambdaBootstrap", LAMBDA_BOOTSTRAP_TYPE.toMethodDescriptorString(), false);
|
||||
|
|
|
@ -121,7 +121,8 @@ public final class ECapturingFunctionRef extends AExpression implements ILambda
|
|||
ref.delegateClassName,
|
||||
ref.delegateInvokeType,
|
||||
ref.delegateMethodName,
|
||||
ref.delegateType
|
||||
ref.delegateType,
|
||||
ref.isDelegateInterface ? 1 : 0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,7 +112,8 @@ public final class EFunctionRef extends AExpression implements ILambda {
|
|||
ref.delegateClassName,
|
||||
ref.delegateInvokeType,
|
||||
ref.delegateMethodName,
|
||||
ref.delegateType
|
||||
ref.delegateType,
|
||||
ref.isDelegateInterface ? 1 : 0
|
||||
);
|
||||
} else {
|
||||
// TODO: don't do this: its just to cutover :)
|
||||
|
|
|
@ -222,7 +222,8 @@ public final class ELambda extends AExpression implements ILambda {
|
|||
ref.delegateClassName,
|
||||
ref.delegateInvokeType,
|
||||
ref.delegateMethodName,
|
||||
ref.delegateType
|
||||
ref.delegateType,
|
||||
ref.isDelegateInterface ? 1 : 0
|
||||
);
|
||||
} else {
|
||||
// placeholder
|
||||
|
|
|
@ -264,6 +264,11 @@ public class BasicExpressionTests extends ScriptTestCase {
|
|||
// assertEquals(null, exec("def a = ['thing': 'bar']; a.other?.cat?.dog = 'wombat'; return a.other?.cat?.dog"));
|
||||
}
|
||||
|
||||
// test to ensure static interface methods are called correctly
|
||||
public void testStaticInterfaceMethod() {
|
||||
assertEquals(4, exec("def values = [1, 4, 3, 2]; values.sort(Comparator.comparing(p -> p)); return values[3]"));
|
||||
}
|
||||
|
||||
private void assertMustBeNullable(String script) {
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> exec(script));
|
||||
assertEquals("Result of null safe operator must be nullable", e.getMessage());
|
||||
|
|
|
@ -184,6 +184,11 @@ public class FunctionRefTests extends ScriptTestCase {
|
|||
"def map = new HashMap(); f(map::getOrDefault)"));
|
||||
}
|
||||
|
||||
public void testInterfaceStaticMethod() {
|
||||
assertEquals(-1, exec("Supplier get(Supplier supplier) { return supplier }" +
|
||||
"Supplier s = get(Comparator::naturalOrder); s.get().compare(1, 2)"));
|
||||
}
|
||||
|
||||
public void testMethodMissing() {
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, () -> {
|
||||
exec("List l = [2, 1]; l.sort(Integer::bogus); return l.get(0);");
|
||||
|
|
|
@ -23,3 +23,12 @@ esplugin {
|
|||
hasClientJar = true
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
// Modules who's integration is explicitly tested in integration tests
|
||||
module project(':modules:lang-mustache')
|
||||
}
|
||||
|
||||
run {
|
||||
// Modules who's integration is explicitly tested in integration tests
|
||||
module project(':modules:lang-mustache')
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.util.Objects;
|
|||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRatings;
|
||||
|
||||
|
@ -129,26 +130,31 @@ public class DiscountedCumulativeGain implements EvaluationMetric {
|
|||
.collect(Collectors.toList());
|
||||
List<RatedSearchHit> ratedHits = joinHitsWithRatings(hits, ratedDocs);
|
||||
List<Integer> ratingsInSearchHits = new ArrayList<>(ratedHits.size());
|
||||
int unratedResults = 0;
|
||||
for (RatedSearchHit hit : ratedHits) {
|
||||
// unknownDocRating might be null, which means it will be unrated docs are
|
||||
// ignored in the dcg calculation
|
||||
// we still need to add them as a placeholder so the rank of the subsequent
|
||||
// ratings is correct
|
||||
// unknownDocRating might be null, in which case unrated docs will be ignored in the dcg calculation.
|
||||
// we still need to add them as a placeholder so the rank of the subsequent ratings is correct
|
||||
ratingsInSearchHits.add(hit.getRating().orElse(unknownDocRating));
|
||||
if (hit.getRating().isPresent() == false) {
|
||||
unratedResults++;
|
||||
}
|
||||
}
|
||||
double dcg = computeDCG(ratingsInSearchHits);
|
||||
final double dcg = computeDCG(ratingsInSearchHits);
|
||||
double result = dcg;
|
||||
double idcg = 0;
|
||||
|
||||
if (normalize) {
|
||||
Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder()));
|
||||
double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size())));
|
||||
if (idcg > 0) {
|
||||
dcg = dcg / idcg;
|
||||
idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size())));
|
||||
if (idcg != 0) {
|
||||
result = dcg / idcg;
|
||||
} else {
|
||||
dcg = 0;
|
||||
result = 0;
|
||||
}
|
||||
}
|
||||
EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg);
|
||||
EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, result);
|
||||
evalQueryQuality.addHitsAndRatings(ratedHits);
|
||||
evalQueryQuality.setMetricDetails(new Detail(dcg, idcg, unratedResults));
|
||||
return evalQueryQuality;
|
||||
}
|
||||
|
||||
|
@ -167,7 +173,7 @@ public class DiscountedCumulativeGain implements EvaluationMetric {
|
|||
private static final ParseField K_FIELD = new ParseField("k");
|
||||
private static final ParseField NORMALIZE_FIELD = new ParseField("normalize");
|
||||
private static final ParseField UNKNOWN_DOC_RATING_FIELD = new ParseField("unknown_doc_rating");
|
||||
private static final ConstructingObjectParser<DiscountedCumulativeGain, Void> PARSER = new ConstructingObjectParser<>("dcg_at", false,
|
||||
private static final ConstructingObjectParser<DiscountedCumulativeGain, Void> PARSER = new ConstructingObjectParser<>("dcg", false,
|
||||
args -> {
|
||||
Boolean normalized = (Boolean) args[0];
|
||||
Integer optK = (Integer) args[2];
|
||||
|
@ -217,4 +223,118 @@ public class DiscountedCumulativeGain implements EvaluationMetric {
|
|||
public final int hashCode() {
|
||||
return Objects.hash(normalize, unknownDocRating, k);
|
||||
}
|
||||
|
||||
public static final class Detail implements MetricDetail {
|
||||
|
||||
private static ParseField DCG_FIELD = new ParseField("dcg");
|
||||
private static ParseField IDCG_FIELD = new ParseField("ideal_dcg");
|
||||
private static ParseField NDCG_FIELD = new ParseField("normalized_dcg");
|
||||
private static ParseField UNRATED_FIELD = new ParseField("unrated_docs");
|
||||
private final double dcg;
|
||||
private final double idcg;
|
||||
private final int unratedDocs;
|
||||
|
||||
Detail(double dcg, double idcg, int unratedDocs) {
|
||||
this.dcg = dcg;
|
||||
this.idcg = idcg;
|
||||
this.unratedDocs = unratedDocs;
|
||||
}
|
||||
|
||||
Detail(StreamInput in) throws IOException {
|
||||
this.dcg = in.readDouble();
|
||||
this.idcg = in.readDouble();
|
||||
this.unratedDocs = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public
|
||||
String getMetricName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(DCG_FIELD.getPreferredName(), this.dcg);
|
||||
if (this.idcg != 0) {
|
||||
builder.field(IDCG_FIELD.getPreferredName(), this.idcg);
|
||||
builder.field(NDCG_FIELD.getPreferredName(), this.dcg / this.idcg);
|
||||
}
|
||||
builder.field(UNRATED_FIELD.getPreferredName(), this.unratedDocs);
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<Detail, Void> PARSER = new ConstructingObjectParser<>(NAME, true, args -> {
|
||||
return new Detail((Double) args[0], (Double) args[1] != null ? (Double) args[1] : 0.0d, (Integer) args[2]);
|
||||
});
|
||||
|
||||
static {
|
||||
PARSER.declareDouble(constructorArg(), DCG_FIELD);
|
||||
PARSER.declareDouble(optionalConstructorArg(), IDCG_FIELD);
|
||||
PARSER.declareInt(constructorArg(), UNRATED_FIELD);
|
||||
}
|
||||
|
||||
public static Detail fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(this.dcg);
|
||||
out.writeDouble(this.idcg);
|
||||
out.writeVInt(this.unratedDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the discounted cumulative gain
|
||||
*/
|
||||
public double getDCG() {
|
||||
return this.dcg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the ideal discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required
|
||||
*/
|
||||
public double getIDCG() {
|
||||
return this.idcg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the normalized discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required
|
||||
*/
|
||||
public double getNDCG() {
|
||||
return (this.idcg != 0) ? this.dcg / this.idcg : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of unrated documents in the search results
|
||||
*/
|
||||
public Object getUnratedDocs() {
|
||||
return this.unratedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
DiscountedCumulativeGain.Detail other = (DiscountedCumulativeGain.Detail) obj;
|
||||
return (this.dcg == other.dcg &&
|
||||
this.idcg == other.idcg &&
|
||||
this.unratedDocs == other.unratedDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(this.dcg, this.idcg, this.unratedDocs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@ public class RankEvalNamedXContentProvider implements NamedXContentProvider {
|
|||
PrecisionAtK.Detail::fromXContent));
|
||||
namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(MeanReciprocalRank.NAME),
|
||||
MeanReciprocalRank.Detail::fromXContent));
|
||||
namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(DiscountedCumulativeGain.NAME),
|
||||
DiscountedCumulativeGain.Detail::fromXContent));
|
||||
return namedXContent;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,8 +61,9 @@ public class RankEvalPlugin extends Plugin implements ActionPlugin {
|
|||
namedWriteables.add(
|
||||
new NamedWriteableRegistry.Entry(EvaluationMetric.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain::new));
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Detail::new));
|
||||
namedWriteables
|
||||
.add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new));
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new));
|
||||
namedWriteables.add(
|
||||
new NamedWriteableRegistry.Entry(MetricDetail.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain.Detail::new));
|
||||
return namedWriteables;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.rankeval;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
|
@ -254,9 +255,8 @@ public class DiscountedCumulativeGainTests extends ESTestCase {
|
|||
|
||||
public static DiscountedCumulativeGain createTestItem() {
|
||||
boolean normalize = randomBoolean();
|
||||
Integer unknownDocRating = Integer.valueOf(randomIntBetween(0, 1000));
|
||||
|
||||
return new DiscountedCumulativeGain(normalize, unknownDocRating, 10);
|
||||
Integer unknownDocRating = frequently() ? Integer.valueOf(randomIntBetween(0, 1000)) : null;
|
||||
return new DiscountedCumulativeGain(normalize, unknownDocRating, randomIntBetween(1, 10));
|
||||
}
|
||||
|
||||
public void testXContentRoundtrip() throws IOException {
|
||||
|
@ -283,7 +283,25 @@ public class DiscountedCumulativeGainTests extends ESTestCase {
|
|||
parser.nextToken();
|
||||
XContentParseException exception = expectThrows(XContentParseException.class,
|
||||
() -> DiscountedCumulativeGain.fromXContent(parser));
|
||||
assertThat(exception.getMessage(), containsString("[dcg_at] unknown field"));
|
||||
assertThat(exception.getMessage(), containsString("[dcg] unknown field"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testMetricDetails() {
|
||||
double dcg = randomDoubleBetween(0, 1, true);
|
||||
double idcg = randomBoolean() ? 0.0 : randomDoubleBetween(0, 1, true);
|
||||
double expectedNdcg = idcg != 0 ? dcg / idcg : 0.0;
|
||||
int unratedDocs = randomIntBetween(0, 100);
|
||||
DiscountedCumulativeGain.Detail detail = new DiscountedCumulativeGain.Detail(dcg, idcg, unratedDocs);
|
||||
assertEquals(dcg, detail.getDCG(), 0.0);
|
||||
assertEquals(idcg, detail.getIDCG(), 0.0);
|
||||
assertEquals(expectedNdcg, detail.getNDCG(), 0.0);
|
||||
assertEquals(unratedDocs, detail.getUnratedDocs());
|
||||
if (idcg != 0) {
|
||||
assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"ideal_dcg\":" + idcg + ",\"normalized_dcg\":" + expectedNdcg
|
||||
+ ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail));
|
||||
} else {
|
||||
assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,10 +68,20 @@ public class EvalQueryQualityTests extends ESTestCase {
|
|||
EvalQueryQuality evalQueryQuality = new EvalQueryQuality(randomAlphaOfLength(10),
|
||||
randomDoubleBetween(0.0, 1.0, true));
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
int metricDetail = randomIntBetween(0, 2);
|
||||
switch (metricDetail) {
|
||||
case 0:
|
||||
evalQueryQuality.setMetricDetails(new PrecisionAtK.Detail(randomIntBetween(0, 1000), randomIntBetween(0, 1000)));
|
||||
} else {
|
||||
break;
|
||||
case 1:
|
||||
evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Detail(randomIntBetween(0, 1000)));
|
||||
break;
|
||||
case 2:
|
||||
evalQueryQuality.setMetricDetails(new DiscountedCumulativeGain.Detail(randomDoubleBetween(0, 1, true),
|
||||
randomBoolean() ? randomDoubleBetween(0, 1, true) : 0, randomInt()));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("illegal randomized value in test");
|
||||
}
|
||||
}
|
||||
evalQueryQuality.addHitsAndRatings(ratedHits);
|
||||
|
|
|
@ -27,17 +27,13 @@ import org.elasticsearch.http.HttpResponse;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class Netty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse, HttpPipelinedMessage {
|
||||
|
||||
private final int sequence;
|
||||
private final Netty4HttpRequest request;
|
||||
|
||||
Netty4HttpResponse(Netty4HttpRequest request, RestStatus status, BytesReference content) {
|
||||
super(request.nettyRequest().protocolVersion(), getStatus(status), Netty4Utils.toByteBuf(content));
|
||||
super(request.nettyRequest().protocolVersion(), HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content));
|
||||
this.sequence = request.sequence();
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -60,62 +56,5 @@ public class Netty4HttpResponse extends DefaultFullHttpResponse implements HttpR
|
|||
public Netty4HttpRequest getRequest() {
|
||||
return request;
|
||||
}
|
||||
|
||||
private static Map<RestStatus, HttpResponseStatus> MAP;
|
||||
|
||||
static {
|
||||
EnumMap<RestStatus, HttpResponseStatus> map = new EnumMap<>(RestStatus.class);
|
||||
map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE);
|
||||
map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS);
|
||||
map.put(RestStatus.OK, HttpResponseStatus.OK);
|
||||
map.put(RestStatus.CREATED, HttpResponseStatus.CREATED);
|
||||
map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED);
|
||||
map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION);
|
||||
map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT);
|
||||
map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT);
|
||||
map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT);
|
||||
map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this??
|
||||
map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES);
|
||||
map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY);
|
||||
map.put(RestStatus.FOUND, HttpResponseStatus.FOUND);
|
||||
map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER);
|
||||
map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED);
|
||||
map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY);
|
||||
map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT);
|
||||
map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED);
|
||||
map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED);
|
||||
map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN);
|
||||
map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND);
|
||||
map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED);
|
||||
map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE);
|
||||
map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED);
|
||||
map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT);
|
||||
map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT);
|
||||
map.put(RestStatus.GONE, HttpResponseStatus.GONE);
|
||||
map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED);
|
||||
map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED);
|
||||
map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE);
|
||||
map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG);
|
||||
map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE);
|
||||
map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE);
|
||||
map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED);
|
||||
map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS);
|
||||
map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED);
|
||||
map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY);
|
||||
map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE);
|
||||
map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT);
|
||||
map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED);
|
||||
MAP = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
private static HttpResponseStatus getStatus(RestStatus status) {
|
||||
return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
82d83fcac1d9c8948aa0247fc9c87f177ddbd59b
|
|
@ -0,0 +1 @@
|
|||
b91a260d8d12ee4b3302a63059c73a34de0ce146
|
|
@ -1 +0,0 @@
|
|||
73fd4364f2931e7c8303b5927e140a7d21116c36
|
|
@ -0,0 +1 @@
|
|||
cc1ca9bd9e2c162dd1da8c2e7111913fd8033e48
|
|
@ -1 +0,0 @@
|
|||
0a2c4417fa9a8be078864f590a5a66b98d551cf5
|
|
@ -0,0 +1 @@
|
|||
2fa3662a10a9e085b1c7b87293d727422cbe6224
|
|
@ -1 +0,0 @@
|
|||
6fa179924f139a30fc0e5399256e1a44562ed32b
|
|
@ -0,0 +1 @@
|
|||
60aa50c11857e6739e68936cb45102562b2c46b4
|
|
@ -1 +0,0 @@
|
|||
5ed135d34d7868b71a725257a46dc8d8735a15d4
|
|
@ -0,0 +1 @@
|
|||
4586368007785a3be26db4b9ce404ffb8c76f350
|
|
@ -1 +0,0 @@
|
|||
875911b36b99c2103719f94559878a0ecb862fb6
|
|
@ -0,0 +1 @@
|
|||
9c6d030ab2c148df7a6ba73a774ef4b8c720a6cb
|
|
@ -1 +0,0 @@
|
|||
e7191628df8cb72382a20da79224aef677117849
|
|
@ -0,0 +1 @@
|
|||
8275bf8df2644d5fcec2963cf237d14b6e00fefe
|
|
@ -53,6 +53,11 @@ test {
|
|||
systemProperty 'tests.artifact', project.name
|
||||
}
|
||||
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:amazon-ec2:check'
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// classes are missing
|
||||
'com.amazonaws.jmespath.JmesPathEvaluationVisitor',
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:discovery-ec2', configuration: 'runtime')
|
||||
}
|
||||
|
||||
final int ec2NumberOfNodes = 3
|
||||
File ec2DiscoveryFile = new File(project.buildDir, 'generated-resources/nodes.uri')
|
||||
|
||||
/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/
|
||||
task ec2Fixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, ec2DiscoveryFile.absolutePath
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'expected_nodes': ec2NumberOfNodes
|
||||
]
|
||||
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn ec2Fixture
|
||||
numNodes = ec2NumberOfNodes
|
||||
plugin ':plugins:discovery-ec2'
|
||||
keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key'
|
||||
keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key'
|
||||
setting 'discovery.zen.hosts_provider', 'ec2'
|
||||
setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}"
|
||||
unicastTransportUri = { seedNode, node, ant -> return null }
|
||||
|
||||
waitCondition = { node, ant ->
|
||||
ec2DiscoveryFile.parentFile.mkdirs()
|
||||
ec2DiscoveryFile.setText(integTest.nodes.collect { n -> "${n.transportUri()}" }.join('\n'), 'UTF-8')
|
||||
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://${node.httpUri()}/",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true,
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
}
|
|
@ -17,17 +17,16 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.rankeval;
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class SmokeTestRankEvalWithMustacheYAMLTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
public class AmazonEC2DiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public SmokeTestRankEvalWithMustacheYAMLTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
public AmazonEC2DiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
|
@ -35,5 +34,4 @@ public class SmokeTestRankEvalWithMustacheYAMLTestSuiteIT extends ESClientYamlSu
|
|||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,194 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.fixture.AbstractHttpFixture;
|
||||
|
||||
import javax.xml.XMLConstants;
|
||||
import javax.xml.stream.XMLOutputFactory;
|
||||
import javax.xml.stream.XMLStreamWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* {@link AmazonEC2Fixture} is a fixture that emulates an AWS EC2 service.
|
||||
*/
|
||||
public class AmazonEC2Fixture extends AbstractHttpFixture {
|
||||
|
||||
private final Path nodes;
|
||||
|
||||
private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) {
|
||||
super(workingDir);
|
||||
this.nodes = toPath(Objects.requireNonNull(nodesUriPath));
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args == null || args.length != 2) {
|
||||
throw new IllegalArgumentException("AmazonEC2Fixture <working directory> <nodes transport uri file>");
|
||||
}
|
||||
|
||||
final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]);
|
||||
fixture.listen();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Response handle(final Request request) throws IOException {
|
||||
if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) {
|
||||
final String userAgent = request.getHeader("User-Agent");
|
||||
if (userAgent != null && userAgent.startsWith("aws-sdk-java")) {
|
||||
// Simulate an EC2 DescribeInstancesResponse
|
||||
byte[] responseBody = EMPTY_BYTE;
|
||||
for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) {
|
||||
if ("Action".equals(parse.getName())) {
|
||||
responseBody = generateDescribeInstancesResponse();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a XML response that describe the EC2 instances
|
||||
*/
|
||||
private byte[] generateDescribeInstancesResponse() {
|
||||
final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory();
|
||||
xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
|
||||
|
||||
final StringWriter out = new StringWriter();
|
||||
XMLStreamWriter sw;
|
||||
try {
|
||||
sw = xmlOutputFactory.createXMLStreamWriter(out);
|
||||
sw.writeStartDocument();
|
||||
|
||||
String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/";
|
||||
sw.setDefaultNamespace(namespace);
|
||||
sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace);
|
||||
{
|
||||
sw.writeStartElement("requestId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("reservationSet");
|
||||
{
|
||||
if (Files.exists(nodes)) {
|
||||
for (String address : Files.readAllLines(nodes)) {
|
||||
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("reservationId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instancesSet");
|
||||
{
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("instanceId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("imageId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceState");
|
||||
{
|
||||
sw.writeStartElement("code");
|
||||
sw.writeCharacters("16");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("name");
|
||||
sw.writeCharacters("running");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateDnsName");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("dnsName");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceType");
|
||||
sw.writeCharacters("m1.medium");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("placement");
|
||||
{
|
||||
sw.writeStartElement("availabilityZone");
|
||||
sw.writeCharacters("use-east-1e");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEmptyElement("groupName");
|
||||
|
||||
sw.writeStartElement("tenancy");
|
||||
sw.writeCharacters("default");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateIpAddress");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("ipAddress");
|
||||
sw.writeCharacters(address);
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEndDocument();
|
||||
sw.flush();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return out.toString().getBytes(UTF_8);
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
|
||||
private static Path toPath(final String dir) {
|
||||
return Paths.get(dir);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
# Integration tests for discovery-ec2
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
wait_for_nodes: ${expected_nodes}
|
||||
|
||||
---
|
||||
"All nodes are correctly discovered":
|
||||
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ transport ]
|
||||
|
||||
- match: { _nodes.total: ${expected_nodes} }
|
|
@ -1,252 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.util.IOUtils;
|
||||
import com.sun.net.httpserver.Headers;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import javax.xml.XMLConstants;
|
||||
import javax.xml.stream.XMLOutputFactory;
|
||||
import javax.xml.stream.XMLStreamException;
|
||||
import javax.xml.stream.XMLStreamWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.StringWriter;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0)
|
||||
@SuppressForbidden(reason = "use http server")
|
||||
// TODO this should be a IT but currently all ITs in this project run against a real cluster
|
||||
public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase {
|
||||
|
||||
private static HttpServer httpServer;
|
||||
private static Path logDir;
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(Ec2DiscoveryPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Path resolve = logDir.resolve(Integer.toString(nodeOrdinal));
|
||||
try {
|
||||
Files.createDirectory(resolve);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString(AwsEc2Service.ACCESS_KEY_SETTING.getKey(), "some_access");
|
||||
secureSettings.setString(AwsEc2Service.SECRET_KEY_SETTING.getKey(), "some_secret");
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "ec2")
|
||||
.put("path.logs", resolve)
|
||||
.put("transport.tcp.port", 0)
|
||||
.put("node.portsfile", "true")
|
||||
.put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" +
|
||||
httpServer.getAddress().getPort())
|
||||
.setSecureSettings(secureSettings)
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates mock EC2 endpoint providing the list of started nodes to the DescribeInstances API call
|
||||
*/
|
||||
@BeforeClass
|
||||
public static void startHttpd() throws Exception {
|
||||
logDir = createTempDir();
|
||||
httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0);
|
||||
|
||||
httpServer.createContext("/", (s) -> {
|
||||
Headers headers = s.getResponseHeaders();
|
||||
headers.add("Content-Type", "text/xml; charset=UTF-8");
|
||||
String action = null;
|
||||
for (NameValuePair parse : URLEncodedUtils.parse(IOUtils.toString(s.getRequestBody()), StandardCharsets.UTF_8)) {
|
||||
if ("Action".equals(parse.getName())) {
|
||||
action = parse.getValue();
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertThat(action, equalTo("DescribeInstances"));
|
||||
|
||||
XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory();
|
||||
xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
|
||||
StringWriter out = new StringWriter();
|
||||
XMLStreamWriter sw;
|
||||
try {
|
||||
sw = xmlOutputFactory.createXMLStreamWriter(out);
|
||||
sw.writeStartDocument();
|
||||
|
||||
String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/";
|
||||
sw.setDefaultNamespace(namespace);
|
||||
sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace);
|
||||
{
|
||||
sw.writeStartElement("requestId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("reservationSet");
|
||||
{
|
||||
Path[] files = FileSystemUtils.files(logDir);
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
Path resolve = files[i].resolve("transport.ports");
|
||||
if (Files.exists(resolve)) {
|
||||
List<String> addresses = Files.readAllLines(resolve);
|
||||
Collections.shuffle(addresses, random());
|
||||
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("reservationId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instancesSet");
|
||||
{
|
||||
sw.writeStartElement("item");
|
||||
{
|
||||
sw.writeStartElement("instanceId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("imageId");
|
||||
sw.writeCharacters(UUID.randomUUID().toString());
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceState");
|
||||
{
|
||||
sw.writeStartElement("code");
|
||||
sw.writeCharacters("16");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("name");
|
||||
sw.writeCharacters("running");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateDnsName");
|
||||
sw.writeCharacters(addresses.get(0));
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("dnsName");
|
||||
sw.writeCharacters(addresses.get(0));
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("instanceType");
|
||||
sw.writeCharacters("m1.medium");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("placement");
|
||||
{
|
||||
sw.writeStartElement("availabilityZone");
|
||||
sw.writeCharacters("use-east-1e");
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEmptyElement("groupName");
|
||||
|
||||
sw.writeStartElement("tenancy");
|
||||
sw.writeCharacters("default");
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("privateIpAddress");
|
||||
sw.writeCharacters(addresses.get(0));
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeStartElement("ipAddress");
|
||||
sw.writeCharacters(addresses.get(0));
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
}
|
||||
}
|
||||
sw.writeEndElement();
|
||||
}
|
||||
sw.writeEndElement();
|
||||
|
||||
sw.writeEndDocument();
|
||||
sw.flush();
|
||||
|
||||
final byte[] responseAsBytes = out.toString().getBytes(StandardCharsets.UTF_8);
|
||||
s.sendResponseHeaders(200, responseAsBytes.length);
|
||||
OutputStream responseBody = s.getResponseBody();
|
||||
responseBody.write(responseAsBytes);
|
||||
responseBody.close();
|
||||
} catch (XMLStreamException e) {
|
||||
Loggers.getLogger(Ec2DiscoveryClusterFormationTests.class).error("Failed serializing XML", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
httpServer.start();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void stopHttpd() throws IOException {
|
||||
for (int i = 0; i < internalCluster().size(); i++) {
|
||||
// shut them all down otherwise we get spammed with connection refused exceptions
|
||||
internalCluster().stopRandomDataNode();
|
||||
}
|
||||
httpServer.stop(0);
|
||||
httpServer = null;
|
||||
logDir = null;
|
||||
}
|
||||
|
||||
public void testJoin() throws ExecutionException, InterruptedException {
|
||||
// only wait for the cluster to form
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||
// add one more node and wait for it to join
|
||||
internalCluster().startDataOnlyNode();
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||
}
|
||||
}
|
|
@ -26,17 +26,13 @@ import org.elasticsearch.http.HttpPipelinedMessage;
|
|||
import org.elasticsearch.http.HttpResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResponse, HttpPipelinedMessage {
|
||||
|
||||
private final int sequence;
|
||||
private final NioHttpRequest request;
|
||||
|
||||
NioHttpResponse(NioHttpRequest request, RestStatus status, BytesReference content) {
|
||||
super(request.nettyRequest().protocolVersion(), getStatus(status), ByteBufUtils.toByteBuf(content));
|
||||
super(request.nettyRequest().protocolVersion(), HttpResponseStatus.valueOf(status.getStatus()), ByteBufUtils.toByteBuf(content));
|
||||
this.sequence = request.sequence();
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -56,63 +52,7 @@ public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResp
|
|||
return sequence;
|
||||
}
|
||||
|
||||
private static Map<RestStatus, HttpResponseStatus> MAP;
|
||||
|
||||
public NioHttpRequest getRequest() {
|
||||
return request;
|
||||
}
|
||||
|
||||
static {
|
||||
EnumMap<RestStatus, HttpResponseStatus> map = new EnumMap<>(RestStatus.class);
|
||||
map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE);
|
||||
map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS);
|
||||
map.put(RestStatus.OK, HttpResponseStatus.OK);
|
||||
map.put(RestStatus.CREATED, HttpResponseStatus.CREATED);
|
||||
map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED);
|
||||
map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION);
|
||||
map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT);
|
||||
map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT);
|
||||
map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT);
|
||||
map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this??
|
||||
map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES);
|
||||
map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY);
|
||||
map.put(RestStatus.FOUND, HttpResponseStatus.FOUND);
|
||||
map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER);
|
||||
map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED);
|
||||
map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY);
|
||||
map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT);
|
||||
map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED);
|
||||
map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED);
|
||||
map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN);
|
||||
map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND);
|
||||
map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED);
|
||||
map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE);
|
||||
map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED);
|
||||
map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT);
|
||||
map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT);
|
||||
map.put(RestStatus.GONE, HttpResponseStatus.GONE);
|
||||
map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED);
|
||||
map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED);
|
||||
map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE);
|
||||
map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG);
|
||||
map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE);
|
||||
map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE);
|
||||
map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED);
|
||||
map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST);
|
||||
map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS);
|
||||
map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED);
|
||||
map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY);
|
||||
map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE);
|
||||
map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT);
|
||||
map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED);
|
||||
MAP = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
private static HttpResponseStatus getStatus(RestStatus status) {
|
||||
return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,3 +163,31 @@ setup() {
|
|||
assert_file_exist /var/log/elasticsearch/gc.log.0.current
|
||||
stop_elasticsearch_service
|
||||
}
|
||||
|
||||
# Ensures that if $MAX_MAP_COUNT is less than the set value on the OS
|
||||
# it will be updated
|
||||
@test "[INIT.D] sysctl is run when the value set is too small" {
|
||||
# intentionally a ridiculously low number
|
||||
sysctl -q -w vm.max_map_count=100
|
||||
start_elasticsearch_service
|
||||
max_map_count=$(sysctl -n vm.max_map_count)
|
||||
stop_elasticsearch_service
|
||||
|
||||
[ $max_map_count = 262144 ]
|
||||
|
||||
}
|
||||
|
||||
# Ensures that if $MAX_MAP_COUNT is greater than the set vaule on the OS
|
||||
# we do not attempt to update it this should cover equality as well as I think
|
||||
# we can trust that equality operators work as intended.
|
||||
@test "[INIT.D] sysctl is not run when it already has a larger or equal value set" {
|
||||
# intentionally set to the default +1
|
||||
sysctl -q -w vm.max_map_count=262145
|
||||
start_elasticsearch_service
|
||||
max_map_count=$(sysctl -n vm.max_map_count)
|
||||
stop_elasticsearch_service
|
||||
|
||||
# default value +1
|
||||
[ $max_map_count = 262145 ]
|
||||
|
||||
}
|
||||
|
|
|
@ -198,9 +198,7 @@ header. The warnings must match exactly. Using it looks like this:
|
|||
....
|
||||
|
||||
If the arguments to `do` include `node_selector` then the request is only
|
||||
sent to nodes that match the `node_selector`. Currently only the `version`
|
||||
selector is supported and it has the same logic as the `version` field in
|
||||
`skip`. It looks like this:
|
||||
sent to nodes that match the `node_selector`. It looks like this:
|
||||
|
||||
....
|
||||
"test id":
|
||||
|
@ -216,6 +214,19 @@ selector is supported and it has the same logic as the `version` field in
|
|||
body: { foo: bar }
|
||||
....
|
||||
|
||||
If you list multiple selectors then the request will only go to nodes that
|
||||
match all of those selectors. The following selectors are supported:
|
||||
* `version`: Only nodes who's version is within the range will receive the
|
||||
request. The syntax for the pattern is the same as when `version` is within
|
||||
`skip`.
|
||||
* `attribute`: Only nodes that have an attribute matching the name and value
|
||||
of the provided attribute match. Looks like:
|
||||
....
|
||||
node_selector:
|
||||
attribute:
|
||||
name: value
|
||||
....
|
||||
|
||||
=== `set`
|
||||
|
||||
For some tests, it is necessary to extract a value from the previous `response`, in
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
8cd761f40c4a89ed977167f0518d12e409eaf3d8
|
|
@ -0,0 +1 @@
|
|||
557d62d2b13d3dcb1810a1633e22625e42425425
|
|
@ -1 +0,0 @@
|
|||
8c93ed67599d345b9359586248ab92342d7d3033
|
|
@ -0,0 +1 @@
|
|||
d3755ad4c98b49fe5055b32358e3071727177c03
|
|
@ -1 +0,0 @@
|
|||
003ed080e5184661e606091cd321c229798b22f8
|
|
@ -0,0 +1 @@
|
|||
c1bbf611535f0b0fd0ba14e8da67c8d645b95244
|
|
@ -1 +0,0 @@
|
|||
0b4be9f96edfd3dbcff5aa9b3f0914e86eb9cc51
|
|
@ -0,0 +1 @@
|
|||
b62ebd53bbefb2f59cd246157a6768cae8a5a3a1
|
|
@ -1 +0,0 @@
|
|||
a5dcceb5bc017cee6ab5d3ee1943aca1ac6fe074
|
|
@ -0,0 +1 @@
|
|||
cba0fd4ccb98db8a72287a95d6b653e455f9eeb3
|
|
@ -1 +0,0 @@
|
|||
b59e7441f121da969bef8eef2c0c61743b4230a8
|
|
@ -0,0 +1 @@
|
|||
5127ed0b7516f8b28d84e837df4f33c67e361f6c
|
|
@ -1 +0,0 @@
|
|||
46736dbb07b432f0a7c1b3080f62932c483e5cb9
|
|
@ -0,0 +1 @@
|
|||
45c7b13aae1104f9f5f0fca0606e5741309c8d74
|
|
@ -1 +0,0 @@
|
|||
ee203718d525da0c6258a51a5a32d877089fe5af
|
|
@ -0,0 +1 @@
|
|||
2540c4b5d9dca8a39a3b4d58efe4ab484df7254f
|
|
@ -1 +0,0 @@
|
|||
cf17a332d8e42a45e8f013d5df408f4391d2620a
|
|
@ -0,0 +1 @@
|
|||
e9d0c0c020917d4bf9b590526866ff5547dbaa17
|
|
@ -1 +0,0 @@
|
|||
04832303d70502d2ece44501cb1716f42e24fe35
|
|
@ -0,0 +1 @@
|
|||
50969cdb7279047fbec94dda6e7d74d1c73c07f8
|
|
@ -1 +0,0 @@
|
|||
639313e3a9573779b6a28b45a7f57fc1f73ffa46
|
|
@ -0,0 +1 @@
|
|||
94524b293572b1f0d01a0faeeade1ff24713f966
|
|
@ -1 +0,0 @@
|
|||
6144b493ba3588a638858d0058054758acc619b9
|
|
@ -0,0 +1 @@
|
|||
878db723e41ece636ed338c4ef374e900f221a14
|
|
@ -1 +0,0 @@
|
|||
9d00c6b8bbbbb496aecd555406267fee9e0af914
|
|
@ -0,0 +1 @@
|
|||
c8dc85c32aeac6ff320aa6a9ea57881ad4847a55
|
|
@ -1 +0,0 @@
|
|||
159cdb6d36845690cb1972d02cc0b472bb14b7f3
|
|
@ -0,0 +1 @@
|
|||
203d8d22ab172e624784a5fdeaecdd01ae25fb3d
|
|
@ -1 +0,0 @@
|
|||
af1dd0218d58990cca5c1592d9722e67d233c996
|
|
@ -0,0 +1 @@
|
|||
4d6cf8fa1064a86991d5cd12a2ed32119ac91212
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
|
||||
|
@ -77,8 +76,9 @@ public class AllocatedPersistentTask extends CancellableTask {
|
|||
* <p>
|
||||
* This doesn't affect the status of this allocated task.
|
||||
*/
|
||||
public void updatePersistentStatus(Task.Status status, ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>> listener) {
|
||||
persistentTasksService.updateStatus(persistentTaskId, allocationId, status, listener);
|
||||
public void updatePersistentTaskState(final PersistentTaskState state,
|
||||
final ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>> listener) {
|
||||
persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, listener);
|
||||
}
|
||||
|
||||
public String getPersistentTaskId() {
|
||||
|
@ -116,7 +116,7 @@ public class AllocatedPersistentTask extends CancellableTask {
|
|||
}
|
||||
|
||||
protected final boolean isCompleted() {
|
||||
return state.get() == State.COMPLETED;
|
||||
return state.get() == State.COMPLETED;
|
||||
}
|
||||
|
||||
boolean markAsCancelled() {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.persistent;
|
|||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
/**
|
||||
|
@ -29,16 +28,17 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
* It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService
|
||||
*/
|
||||
public class NodePersistentTasksExecutor {
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
public NodePersistentTasksExecutor(ThreadPool threadPool) {
|
||||
NodePersistentTasksExecutor(ThreadPool threadPool) {
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
public <Params extends PersistentTaskParams> void executeTask(Params params,
|
||||
@Nullable Task.Status status,
|
||||
AllocatedPersistentTask task,
|
||||
PersistentTasksExecutor<Params> executor) {
|
||||
public <Params extends PersistentTaskParams> void executeTask(final Params params,
|
||||
final @Nullable PersistentTaskState state,
|
||||
final AllocatedPersistentTask task,
|
||||
final PersistentTasksExecutor<Params> executor) {
|
||||
threadPool.executor(executor.getExecutor()).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
|
@ -49,14 +49,12 @@ public class NodePersistentTasksExecutor {
|
|||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
try {
|
||||
executor.nodeOperation(task, params, status);
|
||||
executor.nodeOperation(task, params, state);
|
||||
} catch (Exception ex) {
|
||||
task.markAsFailed(ex);
|
||||
}
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue