Merge branch 'master' into feature/multi_cluster_search

This commit is contained in:
Simon Willnauer 2016-12-20 14:32:09 +01:00
commit 7be3af1123
1100 changed files with 22382 additions and 18942 deletions

5
.gitignore vendored
View File

@ -38,11 +38,14 @@ dependency-reduced-pom.xml
# osx stuff
.DS_Store
# default folders in which the create_bwc_index.py expects to find old es versions in
/backwards
/dev-tools/backwards
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
html_docs
# random old stuff that we should look at the necessity of...
/tmp/
backwards/
eclipse-build

View File

@ -474,10 +474,10 @@ gradle run --debug-jvm
== Building with extra plugins
Additional plugins may be built alongside elasticsearch, where their
dependency on elasticsearch will be substituted with the local elasticsearch
build. To add your plugin, create a directory called x-plugins as a sibling
of elasticsearch. Checkout your plugin underneath x-plugins and the build
will automatically pick it up. You can verify the plugin is included as part
of the build by checking the projects of the build.
build. To add your plugin, create a directory called elasticsearch-extra as
a sibling of elasticsearch. Checkout your plugin underneath elasticsearch-extra
and the build will automatically pick it up. You can verify the plugin is
included as part of the build by checking the projects of the build.
---------------------------------------------------------------------------
gradle projects

View File

@ -24,7 +24,7 @@ buildscript {
}
}
dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.3'
classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.4'
}
}
@ -44,9 +44,8 @@ task test(type: Test, overwrite: true)
dependencies {
compile("org.elasticsearch:elasticsearch:${version}") {
// JMH ships with the conflicting version 4.6 (JMH will not update this dependency as it is Java 6 compatible and joptsimple is one
// of the most recent compatible version). This prevents us from using jopt-simple in benchmarks (which should be ok) but allows us
// to invoke the JMH uberjar as usual.
// JMH ships with the conflicting version 4.6. This prevents us from using jopt-simple in benchmarks (which should be ok) but allows
// us to invoke the JMH uberjar as usual.
exclude group: 'net.sf.jopt-simple', module: 'jopt-simple'
}
compile "org.openjdk.jmh:jmh-core:$versions.jmh"

View File

@ -161,10 +161,20 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
if (false == test.continued) {
current.println('---')
current.println("\"line_$test.start\":")
/* The Elasticsearch test runner doesn't support the warnings
* construct unless you output this skip. Since we don't know
* if this snippet will use the warnings construct we emit this
* warning every time. */
current.println(" - skip:")
current.println(" features: ")
current.println(" - warnings")
}
if (test.skipTest) {
current.println(" - skip:")
current.println(" features: always_skip")
if (test.continued) {
throw new InvalidUserDataException("Continued snippets "
+ "can't be skipped")
}
current.println(" - always_skip")
current.println(" reason: $test.skipTest")
}
if (test.setup != null) {

View File

@ -59,11 +59,16 @@ public class ForbiddenPatternsTask extends DefaultTask {
filesFilter.exclude('**/*.png')
// add mandatory rules
patterns.put('nocommit', /nocommit/)
patterns.put('nocommit', /nocommit|NOCOMMIT/)
patterns.put('nocommit should be all lowercase or all uppercase',
/((?i)nocommit)(?<!(nocommit|NOCOMMIT))/)
patterns.put('tab', /\t/)
if (System.getProperty('build.snapshot', 'true').equals('false')) {
patterns.put('norelease', /norelease/)
patterns.put('norelease', /norelease|NORELEASE/)
}
patterns.put('norelease should be all lowercase or all uppercase',
/((?i)norelease)(?<!(norelease|NORELEASE))/)
inputs.property("excludes", filesFilter.excludes)
inputs.property("rules", patterns)

View File

@ -268,6 +268,7 @@ class ClusterFormationTasks {
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, NodeInfo seedNode) {
Map esConfig = [
'cluster.name' : node.clusterName,
'node.name' : "node-" + node.nodeNum,
'pidfile' : node.pidFile,
'path.repo' : "${node.sharedDir}/repo",
'path.shared_data' : "${node.sharedDir}/",

View File

@ -216,7 +216,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANatives.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JVMCheck.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JarHell.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Seccomp.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
@ -350,7 +349,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]plain[/\\]ParentChildIndexFieldData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]plain[/\\]SortedNumericDVIndexFieldData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]fielddata[/\\]plain[/\\]SortedSetDVOrdinalsIndexFieldData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]get[/\\]GetResult.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]get[/\\]ShardGetService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentFieldMappers.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentMapper.java" checks="LineLength" />
@ -1002,7 +1000,6 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESSingleNodeTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESTestCase.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ExternalNode.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ExternalTestCluster.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />

View File

@ -21,4 +21,4 @@ commonscodec = 1.10
hamcrest = 1.3
securemock = 1.2
# benchmark dependencies
jmh = 1.15
jmh = 1.17.3

View File

@ -59,6 +59,12 @@ final class RequestLogger {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
"] returned [" + httpResponse.getStatusLine() + "]");
}
if (logger.isWarnEnabled()) {
Header[] warnings = httpResponse.getHeaders("Warning");
if (warnings != null && warnings.length > 0) {
logger.warn(buildWarningMessage(request, host, warnings));
}
}
if (tracer.isTraceEnabled()) {
String requestLine;
try {
@ -97,6 +103,18 @@ final class RequestLogger {
}
}
static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[] warnings) {
StringBuilder message = new StringBuilder("request [").append(request.getMethod()).append(" ").append(host)
.append(getUri(request.getRequestLine())).append("] returned ").append(warnings.length).append(" warnings: ");
for (int i = 0; i < warnings.length; i++) {
if (i > 0) {
message.append(",");
}
message.append("[").append(warnings[i].getValue()).append("]");
}
return message.toString();
}
/**
* Creates curl output for given request
*/
@ -134,7 +152,7 @@ final class RequestLogger {
httpResponse.setEntity(entity);
ContentType contentType = ContentType.get(entity);
Charset charset = StandardCharsets.UTF_8;
if (contentType != null) {
if (contentType != null && contentType.getCharset() != null) {
charset = contentType.getCharset();
}
try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) {

View File

@ -23,6 +23,10 @@ package org.elasticsearch.client;
* Listener to be provided when calling async performRequest methods provided by {@link RestClient}.
* Those methods that do accept a listener will return immediately, execute asynchronously, and notify
* the listener whenever the request yielded a response, or failed with an exception.
*
* <p>
* Note that it is <strong>not</strong> safe to call {@link RestClient#close()} from either of these
* callbacks.
*/
public interface ResponseListener {

View File

@ -37,7 +37,7 @@ import java.util.Objects;
*/
public final class RestClientBuilder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10;
@ -185,7 +185,8 @@ public final class RestClientBuilder {
private CloseableHttpAsyncClient createHttpClient() {
//default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom()
.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpHost;
@ -29,10 +29,11 @@ import org.apache.http.client.methods.HttpOptions;
import org.apache.http.client.methods.HttpPatch;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.entity.NByteArrayEntity;
@ -43,16 +44,16 @@ import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
public class RequestLoggerTests extends RestClientTestCase {
public void testTraceRequest() throws IOException, URISyntaxException {
HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https");
String expectedEndpoint = "/index/type/_api";
URI uri;
if (randomBoolean()) {
@ -60,46 +61,15 @@ public class RequestLoggerTests extends RestClientTestCase {
} else {
uri = new URI("index/type/_api");
}
HttpRequestBase request;
int requestType = RandomNumbers.randomIntBetween(getRandom(), 0, 7);
switch(requestType) {
case 0:
request = new HttpGetWithEntity(uri);
break;
case 1:
request = new HttpPost(uri);
break;
case 2:
request = new HttpPut(uri);
break;
case 3:
request = new HttpDeleteWithEntity(uri);
break;
case 4:
request = new HttpHead(uri);
break;
case 5:
request = new HttpTrace(uri);
break;
case 6:
request = new HttpOptions(uri);
break;
case 7:
request = new HttpPatch(uri);
break;
default:
throw new UnsupportedOperationException();
}
HttpUriRequest request = randomHttpRequest(uri);
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean();
String requestBody = "{ \"field\": \"value\" }";
if (hasBody) {
expected += " -d '" + requestBody + "'";
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
HttpEntity entity;
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
switch(randomIntBetween(0, 4)) {
case 0:
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
break;
@ -112,6 +82,10 @@ public class RequestLoggerTests extends RestClientTestCase {
case 3:
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
break;
case 4:
// Evil entity without a charset
entity = new StringEntity(requestBody, (Charset) null);
break;
default:
throw new UnsupportedOperationException();
}
@ -128,12 +102,12 @@ public class RequestLoggerTests extends RestClientTestCase {
public void testTraceResponse() throws IOException {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
int statusCode = RandomNumbers.randomIntBetween(getRandom(), 200, 599);
int statusCode = randomIntBetween(200, 599);
String reasonPhrase = "REASON";
BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase);
String expected = "# " + statusLine.toString();
BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine);
int numHeaders = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
int numHeaders = randomIntBetween(0, 3);
for (int i = 0; i < numHeaders; i++) {
httpResponse.setHeader("header" + i, "value");
expected += "\n# header" + i + ": value";
@ -146,11 +120,20 @@ public class RequestLoggerTests extends RestClientTestCase {
expected += "\n# \"field\": \"value\"";
expected += "\n# }";
HttpEntity entity;
if (getRandom().nextBoolean()) {
switch(randomIntBetween(0, 2)) {
case 0:
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
} else {
break;
case 1:
//test a non repeatable entity
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
break;
case 2:
// Evil entity without a charset
entity = new StringEntity(responseBody, (Charset) null);
break;
default:
throw new UnsupportedOperationException();
}
httpResponse.setEntity(entity);
}
@ -162,4 +145,46 @@ public class RequestLoggerTests extends RestClientTestCase {
assertThat(body, equalTo(responseBody));
}
}
public void testResponseWarnings() throws Exception {
HttpHost host = new HttpHost("localhost", 9200);
HttpUriRequest request = randomHttpRequest(new URI("/index/type/_api"));
int numWarnings = randomIntBetween(1, 5);
StringBuilder expected = new StringBuilder("request [").append(request.getMethod()).append(" ").append(host)
.append("/index/type/_api] returned ").append(numWarnings).append(" warnings: ");
Header[] warnings = new Header[numWarnings];
for (int i = 0; i < numWarnings; i++) {
String warning = "this is warning number " + i;
warnings[i] = new BasicHeader("Warning", warning);
if (i > 0) {
expected.append(",");
}
expected.append("[").append(warning).append("]");
}
assertEquals(expected.toString(), RequestLogger.buildWarningMessage(request, host, warnings));
}
private static HttpUriRequest randomHttpRequest(URI uri) {
int requestType = randomIntBetween(0, 7);
switch(requestType) {
case 0:
return new HttpGetWithEntity(uri);
case 1:
return new HttpPost(uri);
case 2:
return new HttpPut(uri);
case 3:
return new HttpDeleteWithEntity(uri);
case 4:
return new HttpHead(uri);
case 5:
return new HttpTrace(uri);
case 6:
return new HttpOptions(uri);
case 7:
return new HttpPatch(uri);
default:
throw new UnsupportedOperationException();
}
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@ -43,6 +44,8 @@ import java.util.stream.Collectors;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
/**
* A base class for all elasticsearch exceptions.
@ -71,6 +74,14 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";
private static final String RESOURCE_HEADER_ID_KEY = "es.resource.id";
private static final String TYPE = "type";
private static final String REASON = "reason";
private static final String CAUSED_BY = "caused_by";
private static final String STACK_TRACE = "stack_trace";
private static final String HEADER = "header";
private static final String ERROR = "error";
private static final String ROOT_CAUSE = "root_cause";
private static final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> ID_TO_SUPPLIER;
private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE;
private final Map<String, List<String>> headers = new HashMap<>();
@ -247,8 +258,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
if (ex != this) {
toXContent(builder, params, this);
} else {
builder.field("type", getExceptionName());
builder.field("reason", getMessage());
builder.field(TYPE, getExceptionName());
builder.field(REASON, getMessage());
for (String key : headers.keySet()) {
if (key.startsWith("es.")) {
List<String> values = headers.get(key);
@ -258,7 +269,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
innerToXContent(builder, params);
renderHeader(builder, params);
if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) {
builder.field("stack_trace", ExceptionsHelper.stackTrace(this));
builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(this));
}
}
return builder;
@ -277,7 +288,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
protected void causeToXContent(XContentBuilder builder, Params params) throws IOException {
final Throwable cause = getCause();
if (cause != null && params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, REST_EXCEPTION_SKIP_CAUSE_DEFAULT) == false) {
builder.field("caused_by");
builder.field(CAUSED_BY);
builder.startObject();
toXContent(builder, params, cause);
builder.endObject();
@ -291,7 +302,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
continue;
}
if (hasHeader == false) {
builder.startObject("header");
builder.startObject(HEADER);
hasHeader = true;
}
List<String> values = headers.get(key);
@ -324,20 +335,75 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
if (ex instanceof ElasticsearchException) {
((ElasticsearchException) ex).toXContent(builder, params);
} else {
builder.field("type", getExceptionName(ex));
builder.field("reason", ex.getMessage());
builder.field(TYPE, getExceptionName(ex));
builder.field(REASON, ex.getMessage());
if (ex.getCause() != null) {
builder.field("caused_by");
builder.field(CAUSED_BY);
builder.startObject();
toXContent(builder, params, ex.getCause());
builder.endObject();
}
if (params.paramAsBoolean(REST_EXCEPTION_SKIP_STACK_TRACE, REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT) == false) {
builder.field("stack_trace", ExceptionsHelper.stackTrace(ex));
builder.field(STACK_TRACE, ExceptionsHelper.stackTrace(ex));
}
}
}
/**
* Generate a {@link ElasticsearchException} from a {@link XContentParser}. This does not
* return the original exception type (ie NodeClosedException for example) but just wraps
* the type, the reason and the cause of the exception. It also recursively parses the
* tree structure of the cause, returning it as a tree structure of {@link ElasticsearchException}
* instances.
*/
public static ElasticsearchException fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String type = null, reason = null, stack = null;
ElasticsearchException cause = null;
Map<String, Object> headers = new HashMap<>();
do {
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
if (TYPE.equals(currentFieldName)) {
type = parser.text();
} else if (REASON.equals(currentFieldName)) {
reason = parser.text();
} else if (STACK_TRACE.equals(currentFieldName)) {
stack = parser.text();
} else {
// Everything else is considered as a header
headers.put(currentFieldName, parser.text());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (CAUSED_BY.equals(currentFieldName)) {
cause = fromXContent(parser);
} else if (HEADER.equals(currentFieldName)) {
headers.putAll(parser.map());
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
} while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME);
StringBuilder message = new StringBuilder("Elasticsearch exception [");
message.append(TYPE).append('=').append(type).append(", ");
message.append(REASON).append('=').append(reason);
if (stack != null) {
message.append(", ").append(STACK_TRACE).append('=').append(stack);
}
message.append(']');
ElasticsearchException e = new ElasticsearchException(message.toString(), cause);
for (Map.Entry<String, Object> header : headers.entrySet()) {
e.addHeader(header.getKey(), String.valueOf(header.getValue()));
}
return e;
}
/**
* Returns the root cause of this exception or multiple if different shards caused different exceptions
*/
@ -809,9 +875,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
}
public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException {
builder.startObject("error");
builder.startObject(ERROR);
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e);
builder.field("root_cause");
builder.field(ROOT_CAUSE);
builder.startArray();
for (ElasticsearchException rootCause : rootCauses) {
builder.startObject();

View File

@ -35,12 +35,6 @@ public class Version {
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
*/
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_beta2_ID = 2000002;
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_rc1_ID = 2000051;
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_ID = 2000099;
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_1_ID = 2000199;
@ -77,6 +71,8 @@ public class Version {
public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_2_4_2_ID = 2040299;
public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_2_4_3_ID = 2040399;
public static final Version V_2_4_3 = new Version(V_2_4_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
@ -102,6 +98,8 @@ public class Version {
// no version constant for 5.1.0 due to inadvertent release
public static final int V_5_1_1_ID_UNRELEASED = 5010199;
public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_5_1_2_ID_UNRELEASED = 5010299;
public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
@ -126,6 +124,8 @@ public class Version {
return V_6_0_0_alpha1_UNRELEASED;
case V_5_2_0_ID_UNRELEASED:
return V_5_2_0_UNRELEASED;
case V_5_1_2_ID_UNRELEASED:
return V_5_1_2_UNRELEASED;
case V_5_1_1_ID_UNRELEASED:
return V_5_1_1_UNRELEASED;
case V_5_0_3_ID_UNRELEASED:
@ -150,6 +150,8 @@ public class Version {
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
case V_2_4_3_ID:
return V_2_4_3;
case V_2_4_2_ID:
return V_2_4_2;
case V_2_4_1_ID:
@ -186,12 +188,6 @@ public class Version {
return V_2_0_1;
case V_2_0_0_ID:
return V_2_0_0;
case V_2_0_0_rc1_ID:
return V_2_0_0_rc1;
case V_2_0_0_beta2_ID:
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
default:
return new Version(id, org.apache.lucene.util.Version.LATEST);
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action;
import org.elasticsearch.common.CheckedConsumer;
import java.util.function.Consumer;
/**
@ -45,7 +47,8 @@ public interface ActionListener<Response> {
* @param <Response> the type of the response
* @return a listener that listens for responses and invokes the consumer when received
*/
static <Response> ActionListener<Response> wrap(Consumer<Response> onResponse, Consumer<Exception> onFailure) {
static <Response> ActionListener<Response> wrap(CheckedConsumer<Response, ? extends Exception> onResponse,
Consumer<Exception> onFailure) {
return new ActionListener<Response>() {
@Override
public void onResponse(Response response) {

View File

@ -24,8 +24,10 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
@ -168,8 +170,6 @@ import org.elasticsearch.action.ingest.DeletePipelineAction;
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
import org.elasticsearch.action.ingest.GetPipelineAction;
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
import org.elasticsearch.action.ingest.IngestActionFilter;
import org.elasticsearch.action.ingest.IngestProxyActionFilter;
import org.elasticsearch.action.ingest.PutPipelineAction;
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
import org.elasticsearch.action.ingest.SimulatePipelineAction;
@ -201,6 +201,7 @@ import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
@ -309,7 +310,7 @@ import org.elasticsearch.rest.action.search.RestExplainAction;
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
import org.elasticsearch.rest.action.search.RestSuggestAction;
import org.elasticsearch.threadpool.ThreadPool;
import static java.util.Collections.unmodifiableList;
import static java.util.Collections.unmodifiableMap;
@ -319,6 +320,8 @@ import static java.util.Collections.unmodifiableMap;
*/
public class ActionModule extends AbstractModule {
private static final Logger logger = ESLoggerFactory.getLogger(ActionModule.class);
private final boolean transportClient;
private final Settings settings;
private final List<ActionPlugin> actionPlugins;
@ -328,17 +331,28 @@ public class ActionModule extends AbstractModule {
private final DestructiveOperations destructiveOperations;
private final RestController restController;
public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
ClusterSettings clusterSettings, List<ActionPlugin> actionPlugins) {
public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
ClusterSettings clusterSettings, ThreadPool threadPool, List<ActionPlugin> actionPlugins) {
this.transportClient = transportClient;
this.settings = settings;
this.actionPlugins = actionPlugins;
actions = setupActions(actionPlugins);
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
actionFilters = setupActionFilters(actionPlugins);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers);
UnaryOperator<RestHandler> restWrapper = null;
for (ActionPlugin plugin : actionPlugins) {
UnaryOperator<RestHandler> newRestWrapper = plugin.getRestHandlerWrapper(threadPool.getThreadContext());
if (newRestWrapper != null) {
logger.debug("Using REST wrapper from plugin " + plugin.getClass().getName());
if (restWrapper != null) {
throw new IllegalArgumentException("Cannot have more than one plugin implementing a REST wrapper");
}
restWrapper = newRestWrapper;
}
}
restController = new RestController(settings, headers, restWrapper);
}
public Map<String, ActionHandler<?, ?>> getActions() {
@ -460,20 +474,8 @@ public class ActionModule extends AbstractModule {
return unmodifiableMap(actions.getRegistry());
}
private List<Class<? extends ActionFilter>> setupActionFilters(List<ActionPlugin> actionPlugins, boolean ingestEnabled) {
List<Class<? extends ActionFilter>> filters = new ArrayList<>();
if (transportClient == false) {
if (ingestEnabled) {
filters.add(IngestActionFilter.class);
} else {
filters.add(IngestProxyActionFilter.class);
}
}
for (ActionPlugin plugin : actionPlugins) {
filters.addAll(plugin.getActionFilters());
}
return unmodifiableList(filters);
private List<Class<? extends ActionFilter>> setupActionFilters(List<ActionPlugin> actionPlugins) {
return unmodifiableList(actionPlugins.stream().flatMap(p -> p.getActionFilters().stream()).collect(Collectors.toList()));
}
static Set<Class<? extends RestHandler>> setupRestHandlers(List<ActionPlugin> actionPlugins) {
@ -547,7 +549,6 @@ public class ActionModule extends AbstractModule {
registerRestHandler(handlers, RestMultiGetAction.class);
registerRestHandler(handlers, RestDeleteAction.class);
registerRestHandler(handlers, org.elasticsearch.rest.action.document.RestCountAction.class);
registerRestHandler(handlers, RestSuggestAction.class);
registerRestHandler(handlers, RestTermVectorsAction.class);
registerRestHandler(handlers, RestMultiTermVectorsAction.class);
registerRestHandler(handlers, RestBulkAction.class);

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.action;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.support.WriteResponse;
@ -34,6 +35,8 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Locale;
/**
@ -185,8 +188,9 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
/**
* Gets the location of the written document as a string suitable for a {@code Location} header.
* @param routing any routing used in the request. If null the location doesn't include routing information.
*
*/
public String getLocation(@Nullable String routing) {
public String getLocation(@Nullable String routing) throws URISyntaxException {
// Absolute path for the location of the document. This should be allowed as of HTTP/1.1:
// https://tools.ietf.org/html/rfc7231#section-7.1.2
String index = getIndex();
@ -204,7 +208,9 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
if (routing != null) {
location.append(routingStart).append(routing);
}
return location.toString();
URI uri = new URI(location.toString());
return uri.toASCIIString();
}
@Override
@ -214,7 +220,11 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
type = in.readString();
id = in.readString();
version = in.readZLong();
seqNo = in.readZLong();
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
seqNo = in.readZLong();
} else {
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
forcedRefresh = in.readBoolean();
result = Result.readFrom(in);
}
@ -226,7 +236,9 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
out.writeString(type);
out.writeString(id);
out.writeZLong(version);
out.writeZLong(seqNo);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeZLong(seqNo);
}
out.writeBoolean(forcedRefresh);
result.writeTo(out);
}

View File

@ -73,7 +73,7 @@ public class NodeExplanation implements Writeable, ToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
Decision.writeTo(nodeDecision, out);
nodeDecision.writeTo(out);
out.writeFloat(nodeWeight);
if (storeStatus == null) {
out.writeBoolean(false);
@ -106,9 +106,11 @@ public class NodeExplanation implements Writeable, ToXContent {
}
builder.endObject(); // end store
builder.field("final_decision", finalDecision.toString());
builder.field("final_explanation", finalExplanation.toString());
builder.field("final_explanation", finalExplanation);
builder.field("weight", nodeWeight);
builder.startArray("decisions");
nodeDecision.toXContent(builder, params);
builder.endArray();
}
builder.endObject(); // end node <uuid>
return builder;

View File

@ -34,8 +34,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.ClusterServiceState;
import org.elasticsearch.cluster.service.ClusterStateStatus;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
@ -46,6 +44,8 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.function.Predicate;
public class TransportClusterHealthAction extends TransportMasterNodeReadAction<ClusterHealthRequest, ClusterHealthResponse> {
private final GatewayAllocator gatewayAllocator;
@ -142,19 +142,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
assert waitFor >= 0;
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
final ClusterServiceState observedState = observer.observedState();
final ClusterState state = observedState.getClusterState();
final ClusterState state = observer.observedState();
if (request.timeout().millis() == 0) {
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
return;
}
final int concreteWaitFor = waitFor;
final ClusterStateObserver.ChangePredicate validationPredicate = new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(ClusterServiceState newState) {
return newState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, newState.getClusterState(), concreteWaitFor);
}
};
final Predicate<ClusterState> validationPredicate = newState -> validateRequest(request, newState, concreteWaitFor);
final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() {
@Override
@ -174,7 +168,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
listener.onResponse(response);
}
};
if (observedState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, state, concreteWaitFor)) {
if (validationPredicate.test(state)) {
stateListener.onNewClusterState(state);
} else {
observer.waitForNextChange(stateListener, validationPredicate, request.timeout());

View File

@ -105,7 +105,15 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
DiscoveryNode node = clusterService.state().nodes().get(request.getTaskId().getNodeId());
if (node == null) {
// Node is no longer part of the cluster! Try and look the task up from the results index.
getFinishedTaskFromIndex(thisTask, request, listener);
getFinishedTaskFromIndex(thisTask, request, ActionListener.wrap(listener::onResponse, e -> {
if (e instanceof ResourceNotFoundException) {
e = new ResourceNotFoundException(
"task [" + request.getTaskId() + "] belongs to the node [" + request.getTaskId().getNodeId()
+ "] which isn't part of the cluster and there is no record of the task",
e);
}
listener.onFailure(e);
}));
return;
}
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
@ -231,7 +239,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
*/
void onGetFinishedTaskFromIndex(GetResponse response, ActionListener<GetTaskResponse> listener) throws IOException {
if (false == response.isExists()) {
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", response.getId()));
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", response.getId()));
return;
}
if (response.isSourceEmpty()) {

View File

@ -22,22 +22,20 @@ package org.elasticsearch.action.admin.cluster.repositories.put;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* Register repository request.
@ -198,18 +196,8 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(XContentBuilder repositoryDefinition) {
return source(repositoryDefinition.bytes());
}
/**
* Parses repository definition.
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(Map repositoryDefinition) {
Map<String, Object> source = repositoryDefinition;
for (Map.Entry<String, Object> entry : source.entrySet()) {
public PutRepositoryRequest source(Map<String, Object> repositoryDefinition) {
for (Map.Entry<String, Object> entry : repositoryDefinition.entrySet()) {
String name = entry.getKey();
if (name.equals("type")) {
type(entry.getValue().toString());
@ -217,64 +205,14 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
if (!(entry.getValue() instanceof Map)) {
throw new IllegalArgumentException("Malformed settings section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
@SuppressWarnings("unchecked")
Map<String, Object> sub = (Map<String, Object>) entry.getValue();
settings(sub);
}
}
return this;
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(String repositoryDefinition) {
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
}
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(byte[] repositoryDefinition) {
return source(repositoryDefinition, 0, repositoryDefinition.length);
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(byte[] repositoryDefinition, int offset, int length) {
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(BytesReference repositoryDefinition) {
try (XContentParser parser = XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);

View File

@ -25,13 +25,11 @@ import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
@ -41,10 +39,9 @@ import java.util.Map;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
/**
@ -357,17 +354,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
* @param source snapshot definition
* @return this request
*/
public CreateSnapshotRequest source(XContentBuilder source) {
return source(source.bytes());
}
/**
* Parses snapshot definition.
*
* @param source snapshot definition
* @return this request
*/
public CreateSnapshotRequest source(Map source) {
public CreateSnapshotRequest source(Map<String, Object> source) {
for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
String name = entry.getKey();
if (name.equals("indices")) {
@ -393,66 +380,6 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
return this;
}
/**
* Parses snapshot definition. JSON, YAML and properties formats are supported
*
* @param source snapshot definition
* @return this request
*/
public CreateSnapshotRequest source(String source) {
if (hasLength(source)) {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (Exception e) {
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
}
/**
* Parses snapshot definition. JSON, YAML and properties formats are supported
*
* @param source snapshot definition
* @return this request
*/
public CreateSnapshotRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* Parses snapshot definition. JSON, YAML and properties formats are supported
*
* @param source snapshot definition
* @param offset offset
* @param length length
* @return this request
*/
public CreateSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try (XContentParser parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
return this;
}
/**
* Parses snapshot definition. JSON, YAML and properties formats are supported
*
* @param source snapshot definition
* @return this request
*/
public CreateSnapshotRequest source(BytesReference source) {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse snapshot source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -478,4 +405,9 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
out.writeBoolean(waitForCompletion);
out.writeBoolean(partial);
}
@Override
public String getDescription() {
return "snapshot [" + repository + ":" + snapshot + "]";
}
}

View File

@ -24,13 +24,11 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
@ -39,10 +37,9 @@ import java.util.List;
import java.util.Map;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
/**
@ -472,22 +469,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(XContentBuilder source) {
try {
return source(source.bytes());
} catch (Exception e) {
throw new IllegalArgumentException("Failed to build json for repository request", e);
}
}
/**
* Parses restore definition
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(Map source) {
for (Map.Entry<String, Object> entry : ((Map<String, Object>) source).entrySet()) {
public RestoreSnapshotRequest source(Map<String, Object> source) {
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("indices")) {
if (entry.getValue() instanceof String) {
@ -543,74 +526,6 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
return this;
}
/**
* Parses restore definition
* <p>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(String source) {
if (hasLength(source)) {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (Exception e) {
throw new IllegalArgumentException("failed to parse repository source [" + source + "]", e);
}
}
return this;
}
/**
* Parses restore definition
* <p>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* Parses restore definition
* <p>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @param offset offset
* @param length length
* @return this request
*/
public RestoreSnapshotRequest source(byte[] source, int offset, int length) {
if (length > 0) {
try (XContentParser parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse repository source", e);
}
}
return this;
}
/**
* Parses restore definition
* <p>
* JSON, YAML and properties formats are supported
*
* @param source restore definition
* @return this request
*/
public RestoreSnapshotRequest source(BytesReference source) {
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
return source(parser.mapOrdered());
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse template source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -646,4 +561,10 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
writeSettingsToStream(indexSettings, out);
out.writeStringArray(ignoreIndexSettings);
}
@Override
public String getDescription() {
return "snapshot [" + repository + ":" + snapshot + "]";
}
}

View File

@ -101,10 +101,10 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
// When there is a master failure after a restore has been started, this listener might not be registered
// on the current master and as such it might miss some intermediary cluster states due to batching.
// Clean up listener in that case and acknowledge completion of restore operation to client.
clusterService.remove(this);
clusterService.removeListener(this);
listener.onResponse(new RestoreSnapshotResponse(null));
} else if (newEntry == null) {
clusterService.remove(this);
clusterService.removeListener(this);
ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards = prevEntry.shards();
assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state();
assert RestoreService.completed(shards) : "expected all restore entries to be completed";
@ -121,7 +121,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
}
};
clusterService.addLast(clusterStateListener);
clusterService.addListener(clusterStateListener);
} else {
listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo()));
}

View File

@ -86,8 +86,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse {
this.nodeStats = NodeStats.readNodeStats(in);
int size = in.readVInt();
shardsStats = new ShardStats[size];
for (size--; size >= 0; size--) {
shardsStats[size] = ShardStats.readShardStats(in);
for (int i = 0; i < size; i++) {
shardsStats[i] = ShardStats.readShardStats(in);
}
}

View File

@ -58,6 +58,6 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
@Override
public String toString() {
return "flush {" + super.toString() + "}";
return "flush {" + shardId + "}";
}
}

View File

@ -64,16 +64,6 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
return new ReplicaResult();
}
@Override
protected ClusterBlockLevel globalBlockLevel() {
return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected ClusterBlockLevel indexBlockLevel() {
return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected boolean shouldExecuteReplication(Settings settings) {
return true;

View File

@ -67,16 +67,6 @@ public class TransportShardRefreshAction
return new ReplicaResult();
}
@Override
protected ClusterBlockLevel globalBlockLevel() {
return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected ClusterBlockLevel indexBlockLevel() {
return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected boolean shouldExecuteReplication(Settings settings) {
return true;

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.admin.indices.rollover;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
@ -26,16 +25,11 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.HashSet;
@ -50,7 +44,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implements IndicesRequest {
public static ObjectParser<RolloverRequest, ParseFieldMatcherSupplier> PARSER =
public static final ObjectParser<RolloverRequest, ParseFieldMatcherSupplier> PARSER =
new ObjectParser<>("conditions", null);
static {
PARSER.declareField((parser, request, parseFieldMatcherSupplier) ->
@ -194,19 +188,6 @@ public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implem
return createIndexRequest;
}
public void source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
if (xContentType != null) {
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) {
PARSER.parse(parser, this, () -> ParseFieldMatcher.EMPTY);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse source for rollover index", e);
}
} else {
throw new ElasticsearchParseException("failed to parse content type for rollover index source");
}
}
/**
* Sets the number of shard copies that should be active for creation of the
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.action.admin.indices.shrink;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
@ -26,15 +25,10 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Objects;
@ -46,7 +40,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements IndicesRequest {
public static ObjectParser<ShrinkRequest, ParseFieldMatcherSupplier> PARSER =
public static final ObjectParser<ShrinkRequest, ParseFieldMatcherSupplier> PARSER =
new ObjectParser<>("shrink_request", null);
static {
PARSER.declareField((parser, request, parseFieldMatcherSupplier) ->
@ -152,17 +146,4 @@ public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements
public void setWaitForActiveShards(final int waitForActiveShards) {
setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
public void source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
if (xContentType != null) {
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) {
PARSER.parse(parser, this, () -> ParseFieldMatcher.EMPTY);
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse source for shrink index", e);
}
} else {
throw new ElasticsearchParseException("failed to parse content type for shrink index source");
}
}
}

View File

@ -46,6 +46,9 @@ import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.search.suggest.completion.CompletionStats;
import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
import java.util.stream.Stream;
public class CommonStats implements Writeable, ToXContent {
@ -225,45 +228,19 @@ public class CommonStats implements Writeable, ToXContent {
}
public CommonStats(StreamInput in) throws IOException {
if (in.readBoolean()) {
docs = DocsStats.readDocStats(in);
}
if (in.readBoolean()) {
store = StoreStats.readStoreStats(in);
}
if (in.readBoolean()) {
indexing = IndexingStats.readIndexingStats(in);
}
if (in.readBoolean()) {
get = GetStats.readGetStats(in);
}
if (in.readBoolean()) {
search = SearchStats.readSearchStats(in);
}
if (in.readBoolean()) {
merge = MergeStats.readMergeStats(in);
}
if (in.readBoolean()) {
refresh = RefreshStats.readRefreshStats(in);
}
if (in.readBoolean()) {
flush = FlushStats.readFlushStats(in);
}
if (in.readBoolean()) {
warmer = WarmerStats.readWarmerStats(in);
}
if (in.readBoolean()) {
queryCache = QueryCacheStats.readQueryCacheStats(in);
}
if (in.readBoolean()) {
fieldData = FieldDataStats.readFieldDataStats(in);
}
if (in.readBoolean()) {
completion = CompletionStats.readCompletionStats(in);
}
if (in.readBoolean()) {
segments = SegmentsStats.readSegmentsStats(in);
}
docs = in.readOptionalStreamable(DocsStats::new);
store = in.readOptionalStreamable(StoreStats::new);
indexing = in.readOptionalStreamable(IndexingStats::new);
get = in.readOptionalStreamable(GetStats::new);
search = in.readOptionalStreamable(SearchStats::new);
merge = in.readOptionalStreamable(MergeStats::new);
refresh = in.readOptionalStreamable(RefreshStats::new);
flush = in.readOptionalStreamable(FlushStats::new);
warmer = in.readOptionalStreamable(WarmerStats::new);
queryCache = in.readOptionalStreamable(QueryCacheStats::new);
fieldData = in.readOptionalStreamable(FieldDataStats::new);
completion = in.readOptionalStreamable(CompletionStats::new);
segments = in.readOptionalStreamable(SegmentsStats::new);
translog = in.readOptionalStreamable(TranslogStats::new);
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
@ -271,84 +248,19 @@ public class CommonStats implements Writeable, ToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
if (docs == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
docs.writeTo(out);
}
if (store == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
store.writeTo(out);
}
if (indexing == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
indexing.writeTo(out);
}
if (get == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
get.writeTo(out);
}
if (search == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
search.writeTo(out);
}
if (merge == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
merge.writeTo(out);
}
if (refresh == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
refresh.writeTo(out);
}
if (flush == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
flush.writeTo(out);
}
if (warmer == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
warmer.writeTo(out);
}
if (queryCache == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
queryCache.writeTo(out);
}
if (fieldData == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
fieldData.writeTo(out);
}
if (completion == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
completion.writeTo(out);
}
if (segments == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
segments.writeTo(out);
}
out.writeOptionalStreamable(docs);
out.writeOptionalStreamable(store);
out.writeOptionalStreamable(indexing);
out.writeOptionalStreamable(get);
out.writeOptionalStreamable(search);
out.writeOptionalStreamable(merge);
out.writeOptionalStreamable(refresh);
out.writeOptionalStreamable(flush);
out.writeOptionalStreamable(warmer);
out.writeOptionalStreamable(queryCache);
out.writeOptionalStreamable(fieldData);
out.writeOptionalStreamable(completion);
out.writeOptionalStreamable(segments);
out.writeOptionalStreamable(translog);
out.writeOptionalStreamable(requestCache);
out.writeOptionalStreamable(recoveryStats);
@ -590,53 +502,12 @@ public class CommonStats implements Writeable, ToXContent {
// note, requires a wrapping object
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (docs != null) {
docs.toXContent(builder, params);
}
if (store != null) {
store.toXContent(builder, params);
}
if (indexing != null) {
indexing.toXContent(builder, params);
}
if (get != null) {
get.toXContent(builder, params);
}
if (search != null) {
search.toXContent(builder, params);
}
if (merge != null) {
merge.toXContent(builder, params);
}
if (refresh != null) {
refresh.toXContent(builder, params);
}
if (flush != null) {
flush.toXContent(builder, params);
}
if (warmer != null) {
warmer.toXContent(builder, params);
}
if (queryCache != null) {
queryCache.toXContent(builder, params);
}
if (fieldData != null) {
fieldData.toXContent(builder, params);
}
if (completion != null) {
completion.toXContent(builder, params);
}
if (segments != null) {
segments.toXContent(builder, params);
}
if (translog != null) {
translog.toXContent(builder, params);
}
if (requestCache != null) {
requestCache.toXContent(builder, params);
}
if (recoveryStats != null) {
recoveryStats.toXContent(builder, params);
final Stream<ToXContent> stream = Arrays.stream(new ToXContent[] {
docs, store, indexing, get, search, merge, refresh, flush, warmer, queryCache,
fieldData, completion, segments, translog, requestCache, recoveryStats})
.filter(Objects::nonNull);
for (ToXContent toXContent : ((Iterable<ToXContent>)stream::iterator)) {
toXContent.toXContent(builder, params);
}
return builder;
}

View File

@ -135,19 +135,13 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shards = new ShardStats[in.readVInt()];
for (int i = 0; i < shards.length; i++) {
shards[i] = ShardStats.readShardStats(in);
}
shards = in.readArray(ShardStats::readShardStats, (size) -> new ShardStats[size]);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(shards.length);
for (ShardStats shard : shards) {
shard.writeTo(out);
}
out.writeArray(shards);
}
@Override

View File

@ -19,11 +19,13 @@
package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.engine.CommitStats;
@ -32,7 +34,7 @@ import org.elasticsearch.index.shard.ShardPath;
import java.io.IOException;
public class ShardStats implements Streamable, ToXContent {
public class ShardStats implements Streamable, Writeable, ToXContent {
private ShardRouting shardRouting;
private CommonStats commonStats;
@Nullable
@ -102,7 +104,9 @@ public class ShardStats implements Streamable, ToXContent {
statePath = in.readString();
dataPath = in.readString();
isCustomDataPath = in.readBoolean();
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
}
}
@Override
@ -113,7 +117,9 @@ public class ShardStats implements Streamable, ToXContent {
out.writeString(statePath);
out.writeString(dataPath);
out.writeBoolean(isCustomDataPath);
out.writeOptionalWriteable(seqNoStats);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeOptionalWriteable(seqNoStats);
}
}
@Override

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@ -48,9 +47,10 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -73,6 +73,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
* the one with the least casts.
*/
final List<DocWriteRequest> requests = new ArrayList<>();
private final Set<String> indices = new HashSet<>();
List<Object> payloads = null;
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
@ -114,6 +115,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
} else {
throw new IllegalArgumentException("No support for request [" + request + "]");
}
indices.add(request.index());
return this;
}
@ -145,6 +147,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
addPayload(payload);
// lack of source is validated in validate() method
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
@ -172,6 +175,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
if (request.script() != null) {
sizeInBytes += request.script().getIdOrCode().length() * 2;
}
indices.add(request.index());
return this;
}
@ -187,6 +191,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
requests.add(request);
addPayload(payload);
sizeInBytes += REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
@ -394,8 +399,10 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict)
.version(version).versionType(versionType)
.routing(routing)
.parent(parent)
.fromXContent(data.slice(from, nextMarker - from));
.parent(parent);
try (XContentParser sliceParser = xContent.createParser(data.slice(from, nextMarker - from))) {
updateRequest.fromXContent(sliceParser);
}
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
@ -548,4 +555,10 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
refreshPolicy.writeTo(out);
timeout.writeTo(out);
}
@Override
public String getDescription() {
return "requests[" + requests.size() + "], indices[" + Strings.collectionToDelimitedString(indices, ", ") + "]";
}
}

View File

@ -100,6 +100,11 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
return b.toString();
}
@Override
public String getDescription() {
return "requests[" + items.length + "], index[" + index + "]";
}
@Override
public void onRetry() {
for (BulkItemRequest item : items) {

View File

@ -142,7 +142,9 @@ public class Retry {
assert backoff.hasNext();
TimeValue next = backoff.next();
logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry)));
Runnable retry = () -> this.execute(bulkRequestForRetry);
retry = client.threadPool().getThreadContext().preserveContext(retry);
scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, retry);
}
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {

View File

@ -19,8 +19,12 @@
package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.SparseFixedBitSet;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.RoutingMissingException;
@ -30,6 +34,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.TransportDeleteAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.IngestActionForwarder;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.HandledTransportAction;
@ -48,14 +53,15 @@ import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.ingest.IngestService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -74,35 +80,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private final AutoCreateIndex autoCreateIndex;
private final boolean allowIdGeneration;
private final ClusterService clusterService;
private final IngestService ingestService;
private final TransportShardBulkAction shardBulkAction;
private final TransportCreateIndexAction createIndexAction;
private final LongSupplier relativeTimeProvider;
private final IngestActionForwarder ingestForwarder;
@Inject
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, IngestService ingestService,
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
this(settings, threadPool, transportService, clusterService,
this(settings, threadPool, transportService, clusterService, ingestService,
shardBulkAction, createIndexAction,
actionFilters, indexNameExpressionResolver,
autoCreateIndex,
System::nanoTime);
}
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, IngestService ingestService,
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
Objects.requireNonNull(relativeTimeProvider);
this.clusterService = clusterService;
this.ingestService = ingestService;
this.shardBulkAction = shardBulkAction;
this.createIndexAction = createIndexAction;
this.autoCreateIndex = autoCreateIndex;
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
this.relativeTimeProvider = relativeTimeProvider;
this.ingestForwarder = new IngestActionForwarder(transportService);
clusterService.addStateApplier(this.ingestForwarder);
}
@Override
@ -112,6 +124,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
@Override
protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
if (bulkRequest.hasIndexRequestsWithPipelines()) {
if (clusterService.localNode().isIngestNode()) {
processBulkIndexIngestRequest(task, bulkRequest, listener);
} else {
ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener);
}
return;
}
final long startTime = relativeTime();
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
@ -376,4 +397,131 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
return relativeTimeProvider.getAsLong();
}
void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListener<BulkResponse> listener) {
long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]",
indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> {
if (exception != null) {
logger.error("failed to execute pipeline for a bulk request", exception);
listener.onFailure(exception);
} else {
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
if (bulkRequest.requests().isEmpty()) {
// at this stage, the transport bulk action can't deal with a bulk request with no requests,
// so we stop and send an empty response back to the client.
// (this will happen if pre-processing all items in the bulk failed)
actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0));
} else {
doExecute(task, bulkRequest, actionListener);
}
}
});
}
static final class BulkRequestModifier implements Iterator<DocWriteRequest> {
final BulkRequest bulkRequest;
final SparseFixedBitSet failedSlots;
final List<BulkItemResponse> itemResponses;
int currentSlot = -1;
int[] originalSlots;
BulkRequestModifier(BulkRequest bulkRequest) {
this.bulkRequest = bulkRequest;
this.failedSlots = new SparseFixedBitSet(bulkRequest.requests().size());
this.itemResponses = new ArrayList<>(bulkRequest.requests().size());
}
@Override
public DocWriteRequest next() {
return bulkRequest.requests().get(++currentSlot);
}
@Override
public boolean hasNext() {
return (currentSlot + 1) < bulkRequest.requests().size();
}
BulkRequest getBulkRequest() {
if (itemResponses.isEmpty()) {
return bulkRequest;
} else {
BulkRequest modifiedBulkRequest = new BulkRequest();
modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy());
modifiedBulkRequest.waitForActiveShards(bulkRequest.waitForActiveShards());
modifiedBulkRequest.timeout(bulkRequest.timeout());
int slot = 0;
List<DocWriteRequest> requests = bulkRequest.requests();
originalSlots = new int[requests.size()]; // oversize, but that's ok
for (int i = 0; i < requests.size(); i++) {
DocWriteRequest request = requests.get(i);
if (failedSlots.get(i) == false) {
modifiedBulkRequest.add(request);
originalSlots[slot++] = i;
}
}
return modifiedBulkRequest;
}
}
ActionListener<BulkResponse> wrapActionListenerIfNeeded(long ingestTookInMillis, ActionListener<BulkResponse> actionListener) {
if (itemResponses.isEmpty()) {
return ActionListener.wrap(
response -> actionListener.onResponse(
new BulkResponse(response.getItems(), response.getTookInMillis(), ingestTookInMillis)),
actionListener::onFailure);
} else {
return new IngestBulkResponseListener(ingestTookInMillis, originalSlots, itemResponses, actionListener);
}
}
void markCurrentItemAsFailed(Exception e) {
IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(currentSlot);
// We hit a error during preprocessing a request, so we:
// 1) Remember the request item slot from the bulk, so that we're done processing all requests we know what failed
// 2) Add a bulk item failure for this request
// 3) Continue with the next request in the bulk.
failedSlots.set(currentSlot);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);
itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure));
}
}
static final class IngestBulkResponseListener implements ActionListener<BulkResponse> {
private final long ingestTookInMillis;
private final int[] originalSlots;
private final List<BulkItemResponse> itemResponses;
private final ActionListener<BulkResponse> actionListener;
IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
this.ingestTookInMillis = ingestTookInMillis;
this.itemResponses = itemResponses;
this.actionListener = actionListener;
this.originalSlots = originalSlots;
}
@Override
public void onResponse(BulkResponse response) {
BulkItemResponse[] items = response.getItems();
for (int i = 0; i < items.length; i++) {
itemResponses.add(originalSlots[i], response.getItems()[i]);
}
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), response.getTookInMillis(), ingestTookInMillis));
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
}
}

View File

@ -150,6 +150,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
final long version = indexResult.getVersion();
indexRequest.version(version);
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
indexRequest.setSeqNo(indexResult.getSeqNo());
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
indexResult.getVersion(), indexResult.isCreated());
@ -173,6 +174,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
// update the request with the version so it will go to the replicas
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
deleteRequest.version(deleteResult.getVersion());
deleteRequest.setSeqNo(deleteResult.getSeqNo());
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
deleteResult.getVersion(), deleteResult.isFound());
@ -182,6 +184,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
break;
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
}
// update the bulk item request because update request execution can mutate the bulk item request
request.items()[requestIndex] = replicaRequest;
if (operationResult == null) { // in case of noop update operation
@ -282,6 +285,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
final long version = updateOperationResult.getVersion();
indexRequest.version(version);
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
indexRequest.setSeqNo(updateOperationResult.getSeqNo());
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
}
break;
@ -292,6 +296,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
// update the request with the version so it will go to the replicas
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
deleteRequest.version(updateOperationResult.getVersion());
deleteRequest.setSeqNo(updateOperationResult.getSeqNo());
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
}
break;
@ -342,6 +347,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
break;
}
assert (replicaRequest.request() instanceof IndexRequest
&& ((IndexRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) ||
(replicaRequest.request() instanceof DeleteRequest
&& ((DeleteRequest) replicaRequest.request()).getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO);
// successful operation
break; // out of retry loop
} else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) {
@ -364,10 +373,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operationResult = executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), replica);
operationResult = executeIndexRequestOnReplica((IndexRequest) docWriteRequest, replica);
break;
case DELETE:
operationResult = executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), replica);
operationResult = executeDeleteRequestOnReplica((DeleteRequest) docWriteRequest, replica);
break;
default:
throw new IllegalStateException("Unexpected request operation type on replica: "

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.delete;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
@ -39,7 +40,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -125,11 +125,14 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
protected WritePrimaryResult shardOperationOnPrimary(DeleteRequest request, IndexShard primary) throws Exception {
final Engine.DeleteResult result = executeDeleteRequestOnPrimary(request, primary);
final DeleteResponse response;
final DeleteRequest replicaRequest;
if (result.hasFailure() == false) {
// update the request with the version so it will go to the replicas
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
request.version(result.getVersion());
request.setSeqNo(result.getSeqNo());
assert request.versionType().validateVersionForWrites(request.version());
replicaRequest = request;
response = new DeleteResponse(
primary.shardId(),
request.type(),
@ -139,8 +142,9 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
result.isFound());
} else {
response = null;
replicaRequest = null;
}
return new WritePrimaryResult(request, response, result.getTranslogLocation(), result.getFailure(), primary);
return new WritePrimaryResult(replicaRequest, response, result.getTranslogLocation(), result.getFailure(), primary);
}
@Override
@ -156,8 +160,8 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
}
public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) {
final Engine.Delete delete =
replica.prepareDeleteOnReplica(request.type(), request.id(), request.seqNo(), request.version(), request.versionType());
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
request.getSeqNo(), request.primaryTerm(), request.version(), request.versionType());
return replica.delete(delete);
}

View File

@ -20,8 +20,10 @@
package org.elasticsearch.action.fieldstats;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -45,9 +47,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
private long sumTotalTermFreq;
private boolean isSearchable;
private boolean isAggregatable;
private boolean hasMinMax;
protected T minValue;
protected T maxValue;
/**
* Builds a FieldStats where min and max value are not available for the field.
* @param type The native type of this FieldStats
* @param maxDoc Max number of docs
* @param docCount the number of documents that have at least one term for this field,
* or -1 if this information isn't available for this field.
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
* or -1 if this information isn't available for this field.
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
* or -1 if this measure isn't available for this field.
* @param isSearchable true if this field is searchable
* @param isAggregatable true if this field is aggregatable
*/
FieldStats(byte type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
this.type = type;
this.maxDoc = maxDoc;
this.docCount = docCount;
this.sumDocFreq = sumDocFreq;
this.sumTotalTermFreq = sumTotalTermFreq;
this.isSearchable = isSearchable;
this.isAggregatable = isAggregatable;
this.hasMinMax = false;
}
/**
* Builds a FieldStats with min and max value for the field.
* @param type The native type of this FieldStats
* @param maxDoc Max number of docs
* @param docCount the number of documents that have at least one term for this field,
* or -1 if this information isn't available for this field.
* @param sumDocFreq the sum of {@link TermsEnum#docFreq()} for all terms in this field,
* or -1 if this information isn't available for this field.
* @param sumTotalTermFreq the sum of {@link TermsEnum#totalTermFreq} for all terms in this field,
* or -1 if this measure isn't available for this field.
* @param isSearchable true if this field is searchable
* @param isAggregatable true if this field is aggregatable
* @param minValue the minimum value indexed in this field
* @param maxValue the maximum value indexed in this field
*/
FieldStats(byte type,
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
@ -60,6 +103,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
this.sumTotalTermFreq = sumTotalTermFreq;
this.isSearchable = isSearchable;
this.isAggregatable = isAggregatable;
this.hasMinMax = true;
this.minValue = minValue;
this.maxValue = maxValue;
}
@ -85,6 +129,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
}
/**
* @return true if min/max informations are available for this field
*/
public boolean hasMinMax() {
return hasMinMax;
}
/**
* @return the total number of documents.
*
@ -216,7 +267,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
isAggregatable |= other.isAggregatable;
assert type == other.getType();
updateMinMax((T) other.minValue, (T) other.maxValue);
if (hasMinMax && other.hasMinMax) {
updateMinMax((T) other.minValue, (T) other.maxValue);
} else {
hasMinMax = false;
minValue = null;
maxValue = null;
}
}
private void updateMinMax(T min, T max) {
@ -241,7 +298,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
builder.field(SEARCHABLE_FIELD, isSearchable);
builder.field(AGGREGATABLE_FIELD, isAggregatable);
toInnerXContent(builder);
if (hasMinMax) {
toInnerXContent(builder);
}
builder.endObject();
return builder;
}
@ -262,7 +321,14 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
out.writeLong(sumTotalTermFreq);
out.writeBoolean(isSearchable);
out.writeBoolean(isAggregatable);
writeMinMax(out);
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
out.writeBoolean(hasMinMax);
if (hasMinMax) {
writeMinMax(out);
}
} else {
writeMinMax(out);
}
}
protected abstract void writeMinMax(StreamOutput out) throws IOException;
@ -272,6 +338,9 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
* otherwise <code>false</code> is returned
*/
public boolean match(IndexConstraint constraint) {
if (hasMinMax == false) {
return false;
}
int cmp;
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
@ -310,6 +379,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
if (isSearchable != that.isSearchable) return false;
if (isAggregatable != that.isAggregatable) return false;
if (hasMinMax != that.hasMinMax) return false;
if (hasMinMax == false) {
return true;
}
if (!minValue.equals(that.minValue)) return false;
return maxValue.equals(that.maxValue);
@ -318,10 +391,16 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public int hashCode() {
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
minValue, maxValue);
hasMinMax, minValue, maxValue);
}
public static class Long extends FieldStats<java.lang.Long> {
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
long minValue, long maxValue) {
@ -357,6 +436,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
public static class Double extends FieldStats<java.lang.Double> {
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
}
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
double minValue, double maxValue) {
@ -397,6 +481,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
public static class Date extends FieldStats<java.lang.Long> {
private FormatDateTimeFormatter formatter;
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable);
this.formatter = null;
}
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
FormatDateTimeFormatter formatter,
@ -439,23 +529,27 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
Date that = (Date) o;
return Objects.equals(formatter.format(), that.formatter.format());
return Objects.equals(formatter == null ? null : formatter.format(),
that.formatter == null ? null : that.formatter.format());
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + formatter.format().hashCode();
result = 31 * result + (formatter == null ? 0 : formatter.format().hashCode());
return result;
}
}
public static class Text extends FieldStats<BytesRef> {
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
BytesRef minValue, BytesRef maxValue) {
@ -501,6 +595,13 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
public static class Ip extends FieldStats<InetAddress> {
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable) {
super((byte) 4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
public Ip(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
boolean isSearchable, boolean isAggregatable,
InetAddress minValue, InetAddress maxValue) {
@ -550,27 +651,50 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
long sumTotalTermFreq = in.readLong();
boolean isSearchable = in.readBoolean();
boolean isAggregatable = in.readBoolean();
boolean hasMinMax = true;
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
hasMinMax = in.readBoolean();
}
switch (type) {
case 0:
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
if (hasMinMax) {
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readLong(), in.readLong());
} else {
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 1:
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
if (hasMinMax) {
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
} else {
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 2:
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
if (hasMinMax) {
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
} else {
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 3:
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
if (hasMinMax) {
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
} else {
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
case 4:
if (hasMinMax == false) {
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
isSearchable, isAggregatable);
}
int l1 = in.readByte();
byte[] b1 = new byte[l1];
in.readBytes(b1, 0, l1);
@ -599,5 +723,4 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
private static final String MIN_VALUE_AS_STRING_FIELD = "min_value_as_string";
private static final String MAX_VALUE_FIELD = "max_value";
private static final String MAX_VALUE_AS_STRING_FIELD = "max_value_as_string";
}

View File

@ -24,10 +24,8 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
@ -74,41 +72,39 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
this.indexConstraints = indexConstraints;
}
public void source(BytesReference content) throws IOException {
public void source(XContentParser parser) throws IOException {
List<IndexConstraint> indexConstraints = new ArrayList<>();
List<String> fields = new ArrayList<>();
try (XContentParser parser = XContentHelper.createParser(content)) {
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.START_OBJECT;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
break;
case START_OBJECT:
if ("index_constraints".equals(fieldName)) {
parseIndexConstraints(indexConstraints, parser);
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
case START_ARRAY:
if ("fields".equals(fieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue()) {
fields.add(parser.text());
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.START_OBJECT;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
break;
case START_OBJECT:
if ("index_constraints".equals(fieldName)) {
parseIndexConstraints(indexConstraints, parser);
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
case START_ARRAY:
if ("fields".equals(fieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue()) {
fields.add(parser.text());
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
default:
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
default:
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
}
this.fields = fields.toArray(new String[fields.size()]);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.fieldstats;
import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.Nullable;
@ -91,10 +92,21 @@ public class FieldStatsResponse extends BroadcastResponse {
out.writeVInt(indicesMergedFieldStats.size());
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
out.writeString(entry1.getKey());
out.writeVInt(entry1.getValue().size());
int size = entry1.getValue().size();
if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
// filter fieldstats without min/max information
for (FieldStats stats : entry1.getValue().values()) {
if (stats.hasMinMax() == false) {
size--;
}
}
}
out.writeVInt(size);
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
}
}
}
out.writeVInt(conflicts.size());

View File

@ -27,12 +27,14 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.get.GetField;
import org.elasticsearch.index.get.GetResult;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
/**
* The response of a get action.
@ -42,7 +44,7 @@ import java.util.Map;
*/
public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContent {
private GetResult getResult;
GetResult getResult;
GetResponse() {
}
@ -134,18 +136,10 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.getSource();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public Map<String, GetField> getFields() {
return getResult.getFields();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public GetField getField(String name) {
return getResult.field(name);
}
@ -164,6 +158,11 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.toXContent(builder, params);
}
public static GetResponse fromXContent(XContentParser parser) throws IOException {
GetResult getResult = GetResult.fromXContent(parser);
return new GetResponse(getResult);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -176,6 +175,23 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
getResult.writeTo(out);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
GetResponse getResponse = (GetResponse) o;
return Objects.equals(getResult, getResponse.getResult);
}
@Override
public int hashCode() {
return Objects.hash(getResult);
}
@Override
public String toString() {
return Strings.toString(this, true);

View File

@ -30,13 +30,10 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
@ -317,32 +314,19 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
return this;
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, data, true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data, boolean allowExplicitIndex) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, null, data, allowExplicitIndex);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws IOException {
try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex);
} else if ("ids".equals(currentFieldName)) {
parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields,
@Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, XContentParser parser,
boolean allowExplicitIndex) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex);
} else if ("ids".equals(currentFieldName)) {
parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting);
}
}
}

View File

@ -140,9 +140,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
builder.endObject();
} else {
GetResponse getResponse = response.getResponse();
builder.startObject();
getResponse.toXContent(builder, params);
builder.endObject();
}
}
builder.endArray();
@ -154,9 +152,6 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
static final String _INDEX = "_index";
static final String _TYPE = "_type";
static final String _ID = "_id";
static final String ERROR = "error";
static final String ROOT_CAUSE = "root_cause";
}
@Override

View File

@ -140,9 +140,15 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
validationException = addValidationError("source is missing", validationException);
}
final long resolvedVersion = resolveVersionDefaults();
if (opType() == OpType.CREATE) {
if (versionType != VersionType.INTERNAL || version != Versions.MATCH_DELETED) {
validationException = addValidationError("create operations do not support versioning. use index instead", validationException);
if (versionType != VersionType.INTERNAL) {
validationException = addValidationError("create operations only support internal versioning. use index instead", validationException);
return validationException;
}
if (resolvedVersion != Versions.MATCH_DELETED) {
validationException = addValidationError("create operations do not support explicit versions. use index instead", validationException);
return validationException;
}
}
@ -151,8 +157,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
addValidationError("an id is required for a " + opType() + " operation", validationException);
}
if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
if (!versionType.validateVersionForWrites(resolvedVersion)) {
validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" + versionType.name() + "]", validationException);
}
if (versionType == VersionType.FORCE) {
@ -164,7 +170,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
if (id == null && (versionType == VersionType.INTERNAL && resolvedVersion == Versions.MATCH_ANY) == false) {
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
}
@ -323,46 +329,14 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
return this;
}
public IndexRequest source(String field1, Object value1) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(field1, value1).endObject();
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
}
public IndexRequest source(String field1, Object value1, String field2, Object value2) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(field1, value1).field(field2, value2).endObject();
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
}
public IndexRequest source(String field1, Object value1, String field2, Object value2, String field3, Object value3) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(field1, value1).field(field2, value2).field(field3, value3).endObject();
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
}
public IndexRequest source(String field1, Object value1, String field2, Object value2, String field3, Object value3, String field4, Object value4) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
builder.startObject().field(field1, value1).field(field2, value2).field(field3, value3).field(field4, value4).endObject();
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate", e);
}
}
/**
* Sets the content source to index.
* <p>
* <b>Note: the number of objects passed to this method must be an even
* number. Also the first argument in each pair (the field name) must have a
* valid String representation.</b>
* </p>
*/
public IndexRequest source(Object... source) {
if (source.length % 2 != 0) {
throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]");
@ -419,10 +393,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
this.opType = opType;
if (opType == OpType.CREATE) {
version(Versions.MATCH_DELETED);
versionType(VersionType.INTERNAL);
}
return this;
}
@ -465,9 +435,24 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
return this;
}
/**
* Returns stored version. If currently stored version is {@link Versions#MATCH_ANY} and
* opType is {@link OpType#CREATE}, returns {@link Versions#MATCH_DELETED}.
*/
@Override
public long version() {
return this.version;
return resolveVersionDefaults();
}
/**
* Resolves the version based on operation type {@link #opType()}.
*/
private long resolveVersionDefaults() {
if (opType == OpType.CREATE && version == Versions.MATCH_ANY) {
return Versions.MATCH_DELETED;
} else {
return version;
}
}
@Override
@ -539,12 +524,20 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeOptionalString(routing);
out.writeOptionalString(parent);
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeOptionalString(null);
// Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null.
// On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for
// the transport layer OK as it will be ignored.
out.writeOptionalString("0");
out.writeOptionalWriteable(null);
}
out.writeBytesReference(source);
out.writeByte(opType.getId());
out.writeLong(version);
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
if (out.getVersion().before(Version.V_5_1_2_UNRELEASED)) {
out.writeLong(resolveVersionDefaults());
} else {
out.writeLong(version);
}
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);

View File

@ -148,41 +148,13 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this;
}
/**
* Constructs a simple document with a field and a value.
*/
public IndexRequestBuilder setSource(String field1, Object value1) {
request.source(field1, value1);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2) {
request.source(field1, value1, field2, value2);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3) {
request.source(field1, value1, field2, value2, field3, value3);
return this;
}
/**
* Constructs a simple document with a field and value pairs.
*/
public IndexRequestBuilder setSource(String field1, Object value1, String field2, Object value2, String field3, Object value3, String field4, Object value4) {
request.source(field1, value1, field2, value2, field3, value3, field4, value4);
return this;
}
/**
* Constructs a simple document with a field name and value pairs.
* <b>Note: the number of objects passed to this method must be an even number.</b>
* <p>
* <b>Note: the number of objects passed to this method must be an even
* number. Also the first argument in each pair (the field name) must have a
* valid String representation.</b>
* </p>
*/
public IndexRequestBuilder setSource(Object... source) {
request.source(source);

View File

@ -19,11 +19,15 @@
package org.elasticsearch.action.index;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
import org.elasticsearch.action.ingest.IngestActionForwarder;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.replication.ReplicationOperation;
@ -36,6 +40,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.Engine;
@ -44,8 +49,8 @@ import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.ingest.IngestService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -67,14 +72,16 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
private final TransportCreateIndexAction createIndexAction;
private final ClusterService clusterService;
private final IngestService ingestService;
private final MappingUpdatedAction mappingUpdatedAction;
private final IngestActionForwarder ingestForwarder;
@Inject
public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
IndicesService indicesService, IngestService ingestService, ThreadPool threadPool,
ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex) {
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
this.mappingUpdatedAction = mappingUpdatedAction;
@ -82,13 +89,24 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
this.autoCreateIndex = autoCreateIndex;
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
this.clusterService = clusterService;
this.ingestService = ingestService;
this.ingestForwarder = new IngestActionForwarder(transportService);
clusterService.addStateApplier(this.ingestForwarder);
}
@Override
protected void doExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
if (Strings.hasText(request.getPipeline())) {
if (clusterService.localNode().isIngestNode()) {
processIngestIndexRequest(task, request, listener);
} else {
ingestForwarder.forwardIngestRequest(IndexAction.INSTANCE, request, listener);
}
return;
}
// if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API
ClusterState state = clusterService.state();
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
if (shouldAutoCreate(request, state)) {
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
createIndexRequest.index(request.index());
createIndexRequest.cause("auto(index api)");
@ -119,6 +137,10 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
}
}
protected boolean shouldAutoCreate(IndexRequest request, ClusterState state) {
return autoCreateIndex.shouldAutoCreate(request.index(), state);
}
@Override
protected void resolveRequest(MetaData metaData, IndexMetaData indexMetaData, IndexRequest request) {
super.resolveRequest(metaData, indexMetaData, request);
@ -130,7 +152,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
request.setShardId(shardId);
}
private void innerExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
protected void innerExecute(Task task, final IndexRequest request, final ActionListener<IndexResponse> listener) {
super.doExecute(task, request, listener);
}
@ -143,19 +165,22 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
protected WritePrimaryResult shardOperationOnPrimary(IndexRequest request, IndexShard primary) throws Exception {
final Engine.IndexResult indexResult = executeIndexRequestOnPrimary(request, primary, mappingUpdatedAction);
final IndexResponse response;
final IndexRequest replicaRequest;
if (indexResult.hasFailure() == false) {
// update the version on request so it will happen on the replicas
final long version = indexResult.getVersion();
request.version(version);
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
request.seqNo(indexResult.getSeqNo());
request.setSeqNo(indexResult.getSeqNo());
assert request.versionType().validateVersionForWrites(request.version());
replicaRequest = request;
response = new IndexResponse(primary.shardId(), request.type(), request.id(), indexResult.getSeqNo(),
indexResult.getVersion(), indexResult.isCreated());
} else {
response = null;
replicaRequest = null;
}
return new WritePrimaryResult(request, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary);
return new WritePrimaryResult(replicaRequest, response, indexResult.getTranslogLocation(), indexResult.getFailure(), primary);
}
@Override
@ -175,9 +200,9 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
final Engine.Index operation;
try {
operation = replica.prepareIndexOnReplica(sourceToParse, request.seqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
operation = replica.prepareIndexOnReplica(sourceToParse, request.getSeqNo(), request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry());
} catch (MapperParsingException e) {
return new Engine.IndexResult(e, request.version(), request.seqNo());
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
@ -199,7 +224,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version(), request.seqNo());
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = primary.shardId();
@ -210,12 +235,12 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
} catch (IllegalArgumentException e) {
// throws IAE on conflicts merging dynamic mappings
return new Engine.IndexResult(e, request.version(), request.seqNo());
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
}
try {
operation = prepareIndexOperationOnPrimary(request, primary);
} catch (MapperParsingException | IllegalArgumentException e) {
return new Engine.IndexResult(e, request.version(), request.seqNo());
return new Engine.IndexResult(e, request.version(), request.getSeqNo());
}
update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
@ -227,5 +252,17 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
return primary.index(operation);
}
}
private void processIngestIndexRequest(Task task, IndexRequest indexRequest, ActionListener listener) {
ingestService.getPipelineExecutionService().executeIndexRequest(indexRequest, t -> {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t);
}, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
// processes the primary action. This could lead to a pipeline being executed twice for the same
// index request, hence we set the pipeline to null once its execution completed.
indexRequest.setPipeline(null);
doExecute(task, indexRequest, listener);
});
}
}

View File

@ -1,243 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineExecutionService;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.tasks.Task;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
public final class IngestActionFilter extends AbstractComponent implements ActionFilter {
private final PipelineExecutionService executionService;
@Inject
public IngestActionFilter(Settings settings, NodeService nodeService) {
super(settings);
this.executionService = nodeService.getIngestService().getPipelineExecutionService();
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
switch (action) {
case IndexAction.NAME:
IndexRequest indexRequest = (IndexRequest) request;
if (Strings.hasText(indexRequest.getPipeline())) {
processIndexRequest(task, action, listener, chain, (IndexRequest) request);
} else {
chain.proceed(task, action, request, listener);
}
break;
case BulkAction.NAME:
BulkRequest bulkRequest = (BulkRequest) request;
if (bulkRequest.hasIndexRequestsWithPipelines()) {
@SuppressWarnings("unchecked")
ActionListener<BulkResponse> actionListener = (ActionListener<BulkResponse>) listener;
processBulkIndexRequest(task, bulkRequest, action, chain, actionListener);
} else {
chain.proceed(task, action, request, listener);
}
break;
default:
chain.proceed(task, action, request, listener);
break;
}
}
@Override
public <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener, ActionFilterChain<?, Response> chain) {
chain.proceed(action, response, listener);
}
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.executeIndexRequest(indexRequest, t -> {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}]", indexRequest.getPipeline()), t);
listener.onFailure(t);
}, success -> {
// TransportIndexAction uses IndexRequest and same action name on the node that receives the request and the node that
// processes the primary action. This could lead to a pipeline being executed twice for the same
// index request, hence we set the pipeline to null once its execution completed.
indexRequest.setPipeline(null);
chain.proceed(task, action, indexRequest, listener);
});
}
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
long ingestStartTimeInNanos = System.nanoTime();
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception);
bulkRequestModifier.markCurrentItemAsFailed(exception);
}, (exception) -> {
if (exception != null) {
logger.error("failed to execute pipeline for a bulk request", exception);
listener.onFailure(exception);
} else {
long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos);
BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest();
ActionListener<BulkResponse> actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener);
if (bulkRequest.requests().isEmpty()) {
// at this stage, the transport bulk action can't deal with a bulk request with no requests,
// so we stop and send an empty response back to the client.
// (this will happen if pre-processing all items in the bulk failed)
actionListener.onResponse(new BulkResponse(new BulkItemResponse[0], 0));
} else {
chain.proceed(task, action, bulkRequest, actionListener);
}
}
});
}
@Override
public int order() {
return Integer.MAX_VALUE;
}
static final class BulkRequestModifier implements Iterator<DocWriteRequest> {
final BulkRequest bulkRequest;
final Set<Integer> failedSlots;
final List<BulkItemResponse> itemResponses;
int currentSlot = -1;
int[] originalSlots;
BulkRequestModifier(BulkRequest bulkRequest) {
this.bulkRequest = bulkRequest;
this.failedSlots = new HashSet<>();
this.itemResponses = new ArrayList<>(bulkRequest.requests().size());
}
@Override
public DocWriteRequest next() {
return bulkRequest.requests().get(++currentSlot);
}
@Override
public boolean hasNext() {
return (currentSlot + 1) < bulkRequest.requests().size();
}
BulkRequest getBulkRequest() {
if (itemResponses.isEmpty()) {
return bulkRequest;
} else {
BulkRequest modifiedBulkRequest = new BulkRequest();
modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy());
modifiedBulkRequest.waitForActiveShards(bulkRequest.waitForActiveShards());
modifiedBulkRequest.timeout(bulkRequest.timeout());
int slot = 0;
originalSlots = new int[bulkRequest.requests().size() - failedSlots.size()];
for (int i = 0; i < bulkRequest.requests().size(); i++) {
DocWriteRequest request = bulkRequest.requests().get(i);
if (failedSlots.contains(i) == false) {
modifiedBulkRequest.add(request);
originalSlots[slot++] = i;
}
}
return modifiedBulkRequest;
}
}
ActionListener<BulkResponse> wrapActionListenerIfNeeded(long ingestTookInMillis, ActionListener<BulkResponse> actionListener) {
if (itemResponses.isEmpty()) {
return new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
actionListener.onResponse(new BulkResponse(response.getItems(), response.getTookInMillis(), ingestTookInMillis));
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
};
} else {
return new IngestBulkResponseListener(ingestTookInMillis, originalSlots, itemResponses, actionListener);
}
}
void markCurrentItemAsFailed(Exception e) {
IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(currentSlot);
// We hit a error during preprocessing a request, so we:
// 1) Remember the request item slot from the bulk, so that we're done processing all requests we know what failed
// 2) Add a bulk item failure for this request
// 3) Continue with the next request in the bulk.
failedSlots.add(currentSlot);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);
itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure));
}
}
static final class IngestBulkResponseListener implements ActionListener<BulkResponse> {
private final long ingestTookInMillis;
private final int[] originalSlots;
private final List<BulkItemResponse> itemResponses;
private final ActionListener<BulkResponse> actionListener;
IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List<BulkItemResponse> itemResponses, ActionListener<BulkResponse> actionListener) {
this.ingestTookInMillis = ingestTookInMillis;
this.itemResponses = itemResponses;
this.actionListener = actionListener;
this.originalSlots = originalSlots;
}
@Override
public void onResponse(BulkResponse response) {
for (int i = 0; i < response.getItems().length; i++) {
itemResponses.add(originalSlots[i], response.getItems()[i]);
}
actionListener.onResponse(new BulkResponse(itemResponses.toArray(new BulkItemResponse[itemResponses.size()]), response.getTookInMillis(), ingestTookInMillis));
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
}
}

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateApplier;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger;
/**
* A utility for forwarding ingest requests to ingest nodes in a round-robin fashion.
*
* TODO: move this into IngestService and make index/bulk actions call that
*/
public final class IngestActionForwarder implements ClusterStateApplier {
private final TransportService transportService;
private final AtomicInteger ingestNodeGenerator = new AtomicInteger(Randomness.get().nextInt());
private DiscoveryNode[] ingestNodes;
public IngestActionForwarder(TransportService transportService) {
this.transportService = transportService;
ingestNodes = new DiscoveryNode[0];
}
public void forwardIngestRequest(Action<?, ?, ?> action, ActionRequest request, ActionListener<?> listener) {
transportService.sendRequest(randomIngestNode(), action.name(), request,
new ActionListenerResponseHandler(listener, action::newResponse));
}
private DiscoveryNode randomIngestNode() {
final DiscoveryNode[] nodes = ingestNodes;
if (nodes.length == 0) {
throw new IllegalStateException("There are no ingest nodes in this cluster, unable to forward request to an ingest node.");
}
return nodes[Math.floorMod(ingestNodeGenerator.incrementAndGet(), nodes.length)];
}
@Override
public void applyClusterState(ClusterChangedEvent event) {
ingestNodes = event.state().getNodes().getIngestNodes().values().toArray(DiscoveryNode.class);
}
}

View File

@ -1,119 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.util.concurrent.atomic.AtomicInteger;
public final class IngestProxyActionFilter implements ActionFilter {
private final ClusterService clusterService;
private final TransportService transportService;
private final AtomicInteger randomNodeGenerator = new AtomicInteger(Randomness.get().nextInt());
@Inject
public IngestProxyActionFilter(ClusterService clusterService, TransportService transportService) {
this.clusterService = clusterService;
this.transportService = transportService;
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request, ActionListener<Response> listener, ActionFilterChain<Request, Response> chain) {
Action ingestAction;
switch (action) {
case IndexAction.NAME:
ingestAction = IndexAction.INSTANCE;
IndexRequest indexRequest = (IndexRequest) request;
if (Strings.hasText(indexRequest.getPipeline())) {
forwardIngestRequest(ingestAction, request, listener);
} else {
chain.proceed(task, action, request, listener);
}
break;
case BulkAction.NAME:
ingestAction = BulkAction.INSTANCE;
BulkRequest bulkRequest = (BulkRequest) request;
if (bulkRequest.hasIndexRequestsWithPipelines()) {
forwardIngestRequest(ingestAction, request, listener);
} else {
chain.proceed(task, action, request, listener);
}
break;
default:
chain.proceed(task, action, request, listener);
break;
}
}
@SuppressWarnings("unchecked")
private void forwardIngestRequest(Action<?, ?, ?> action, ActionRequest request, ActionListener<?> listener) {
transportService.sendRequest(randomIngestNode(), action.name(), request, new ActionListenerResponseHandler(listener, action::newResponse));
}
@Override
public <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener, ActionFilterChain<?, Response> chain) {
chain.proceed(action, response, listener);
}
@Override
public int order() {
return Integer.MAX_VALUE;
}
private DiscoveryNode randomIngestNode() {
assert clusterService.localNode().isIngestNode() == false;
DiscoveryNodes nodes = clusterService.state().getNodes();
DiscoveryNode[] ingestNodes = nodes.getIngestNodes().values().toArray(DiscoveryNode.class);
if (ingestNodes.length == 0) {
throw new IllegalStateException("There are no ingest nodes in this cluster, unable to forward request to an ingest node.");
}
int index = getNodeNumber();
return ingestNodes[(index) % ingestNodes.length];
}
private int getNodeNumber() {
int index = randomNodeGenerator.incrementAndGet();
if (index < 0) {
index = 0;
randomNodeGenerator.set(0);
}
return index;
}
}

View File

@ -50,6 +50,7 @@ import java.util.function.Function;
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
private static final float DEFAULT_INDEX_BOOST = 1.0f;
protected final Logger logger;
protected final SearchTransportService searchTransportService;
@ -66,6 +67,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
private final AtomicInteger totalOps = new AtomicInteger();
protected final AtomicArray<FirstResult> firstResults;
private final Map<String, AliasFilter> aliasFilter;
private final Map<String, Float> concreteIndexBoosts;
private final long clusterStateVersion;
private volatile AtomicArray<ShardSearchFailure> shardFailures;
private final Object shardFailuresMutex = new Object();
@ -73,9 +75,9 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
long clusterStateVersion, SearchTask task) {
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) {
super(startTime);
this.logger = logger;
this.searchTransportService = searchTransportService;
@ -91,6 +93,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
firstResults = new AtomicArray<>(shardsIts.size());
this.aliasFilter = aliasFilter;
this.concreteIndexBoosts = concreteIndexBoosts;
}
public void start() {
@ -125,8 +128,10 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
} else {
AliasFilter filter = this.aliasFilter.get(shard.index().getUUID());
assert filter != null;
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(),
filter, startTime());
filter, indexBoost, startTime());
sendExecuteFirstPhase(node, transportRequest , new ActionListener<FirstResult>() {
@Override
public void onResponse(FirstResult result) {

View File

@ -47,10 +47,11 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
private final SearchPhaseController searchPhaseController;
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts,
long startTime, long clusterStateVersion, SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
request, listener, shardsIts, startTime, clusterStateVersion, task);
this.searchPhaseController = searchPhaseController;
queryFetchResults = new AtomicArray<>(firstResults.length());

View File

@ -55,11 +55,11 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
long clusterStateVersion, SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
request, listener, shardsIts, startTime, clusterStateVersion, task);
this.searchPhaseController = searchPhaseController;
queryResults = new AtomicArray<>(firstResults.length());

View File

@ -40,12 +40,12 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
SearchPhaseController searchPhaseController, Executor executor,
SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
request, listener, shardsIts, startTime, clusterStateVersion, task);
this.searchPhaseController = searchPhaseController;

View File

@ -50,13 +50,13 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
private final SearchPhaseController searchPhaseController;
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode, Map<String,
AliasFilter> aliasFilter,
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
SearchPhaseController searchPhaseController, Executor executor,
SearchRequest request, ActionListener<SearchResponse> listener,
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
SearchTask task) {
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener,
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener,
shardsIts, startTime, clusterStateVersion, task);
this.searchPhaseController = searchPhaseController;
fetchResults = new AtomicArray<>(firstResults.length());

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -100,6 +101,29 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
return aliasFilterMap;
}
private Map<String, Float> resolveIndexBoosts(SearchRequest searchRequest, ClusterState clusterState) {
if (searchRequest.source() == null) {
return Collections.emptyMap();
}
SearchSourceBuilder source = searchRequest.source();
if (source.indexBoosts() == null) {
return Collections.emptyMap();
}
Map<String, Float> concreteIndexBoosts = new HashMap<>();
for (SearchSourceBuilder.IndexBoost ib : source.indexBoosts()) {
Index[] concreteIndices =
indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), ib.getIndex());
for (Index concreteIndex : concreteIndices) {
concreteIndexBoosts.putIfAbsent(concreteIndex.getUUID(), ib.getBoost());
}
}
return Collections.unmodifiableMap(concreteIndexBoosts);
}
@Override
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// pure paranoia if time goes backwards we are at least positive
@ -212,6 +236,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
failIfOverShardCountLimit(clusterService, shardIterators.size());
Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState);
// optimize search type for cases where there is only one shard group to search on
if (shardIterators.size() == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
@ -230,9 +256,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
}
Function<String, DiscoveryNode> nodesLookup = mergeNodesLookup(clusterState.nodes(), remoteNodes);
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, nodesLookup, clusterState.version(),
Collections.unmodifiableMap(aliasFilter), listener).start();
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, nodesLookup,clusterState.version(),
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
}
private static GroupShardsIterator mergeShardsIterators(GroupShardsIterator localShardsIterator,
@ -271,28 +296,29 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, GroupShardsIterator shardIterators,
long startTime, Function<String, DiscoveryNode> nodesLookup,
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
Map<String, Float> concreteIndexBoosts,
ActionListener<SearchResponse> listener) {
Executor executor = threadPool.executor(ThreadPool.Names.SEARCH);
AbstractSearchAsyncAction searchAsyncAction;
switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion, task);
break;
case QUERY_THEN_FETCH:
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion, task);
break;
case DFS_QUERY_AND_FETCH:
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion, task);
break;
case QUERY_AND_FETCH:
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
clusterStateVersion, task);
break;
default:
@ -311,5 +337,4 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
+ "] to a greater value if you really want to query that many shards at the same time.");
}
}
}

View File

@ -42,14 +42,6 @@ public interface ActionFilter {
*/
<Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener, ActionFilterChain<Request, Response> chain);
/**
* Enables filtering the execution of an action on the response side, either by sending a response through the
* {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain}
*/
<Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener,
ActionFilterChain<?, Response> chain);
/**
* A simple base class for injectable action filters that spares the implementation from handling the
* filter chain. This base class should serve any action filter implementations that doesn't require
@ -74,19 +66,5 @@ public interface ActionFilter {
* if it should be aborted since the filter already handled the request and called the given listener.
*/
protected abstract boolean apply(String action, ActionRequest request, ActionListener<?> listener);
@Override
public final <Response extends ActionResponse> void apply(String action, Response response, ActionListener<Response> listener,
ActionFilterChain<?, Response> chain) {
if (apply(action, response, listener)) {
chain.proceed(action, response, listener);
}
}
/**
* Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false}
* if it should be aborted since the filter already handled the response by calling the given listener.
*/
protected abstract boolean apply(String action, ActionResponse response, ActionListener<?> listener);
}
}

View File

@ -34,10 +34,4 @@ public interface ActionFilterChain<Request extends ActionRequest, Response exten
* the given {@link ActionListener listener}
*/
void proceed(Task task, final String action, final Request request, final ActionListener<Response> listener);
/**
* Continue processing the response. Should only be called if a response has not been sent through
* the given {@link ActionListener listener}
*/
void proceed(final String action, final Response response, final ActionListener<Response> listener);
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.ClusterServiceState;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -31,6 +30,7 @@ import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* This class provides primitives for waiting for a configured number of shards
@ -70,16 +70,10 @@ public class ActiveShardsObserver extends AbstractComponent {
}
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
if (activeShardCount.enoughShardsActive(observer.observedState().getClusterState(), indexName)) {
if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) {
onResult.accept(true);
} else {
final ClusterStateObserver.ChangePredicate shardsAllocatedPredicate =
new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(final ClusterServiceState newState) {
return activeShardCount.enoughShardsActive(newState.getClusterState(), indexName);
}
};
final Predicate<ClusterState> shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexName);
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
@Override

View File

@ -167,8 +167,7 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
if (i < this.action.filters.length) {
this.action.filters[i].apply(task, actionName, request, listener, this);
} else if (i == this.action.filters.length) {
this.action.doExecute(task, request, new FilteredActionListener<>(actionName, listener,
new ResponseFilterChain<>(this.action.filters, logger)));
this.action.doExecute(task, request, listener);
} else {
listener.onFailure(new IllegalStateException("proceed was called too many times"));
}
@ -178,69 +177,6 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
}
}
@Override
public void proceed(String action, Response response, ActionListener<Response> listener) {
assert false : "request filter chain should never be called on the response side";
}
}
private static class ResponseFilterChain<Request extends ActionRequest, Response extends ActionResponse>
implements ActionFilterChain<Request, Response> {
private final ActionFilter[] filters;
private final AtomicInteger index;
private final Logger logger;
private ResponseFilterChain(ActionFilter[] filters, Logger logger) {
this.filters = filters;
this.index = new AtomicInteger(filters.length);
this.logger = logger;
}
@Override
public void proceed(Task task, String action, Request request, ActionListener<Response> listener) {
assert false : "response filter chain should never be called on the request side";
}
@Override
public void proceed(String action, Response response, ActionListener<Response> listener) {
int i = index.decrementAndGet();
try {
if (i >= 0) {
filters[i].apply(action, response, listener, this);
} else if (i == -1) {
listener.onResponse(response);
} else {
listener.onFailure(new IllegalStateException("proceed was called too many times"));
}
} catch (Exception e) {
logger.trace("Error during transport action execution.", e);
listener.onFailure(e);
}
}
}
private static class FilteredActionListener<Response extends ActionResponse> implements ActionListener<Response> {
private final String actionName;
private final ActionListener<Response> listener;
private final ResponseFilterChain<?, Response> chain;
private FilteredActionListener(String actionName, ActionListener<Response> listener, ResponseFilterChain<?, Response> chain) {
this.actionName = actionName;
this.listener = listener;
this.chain = chain;
}
@Override
public void onResponse(Response response) {
chain.proceed(actionName, response, listener);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
}
/**

View File

@ -505,11 +505,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indicesLevelRequest = readRequestFrom(in);
int size = in.readVInt();
shards = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
shards.add(new ShardRouting(in));
}
shards = in.readList(ShardRouting::new);
nodeId = in.readString();
}
@ -517,11 +513,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
indicesLevelRequest.writeTo(out);
int size = shards.size();
out.writeVInt(size);
for (int i = 0; i < size; i++) {
shards.get(i).writeTo(out);
}
out.writeList(shards);
out.writeString(nodeId);
}
}
@ -566,18 +558,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
super.readFrom(in);
nodeId = in.readString();
totalShards = in.readVInt();
int resultsSize = in.readVInt();
results = new ArrayList<>(resultsSize);
for (; resultsSize > 0; resultsSize--) {
final ShardOperationResult result = in.readBoolean() ? readShardResult(in) : null;
results.add(result);
}
results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null);
if (in.readBoolean()) {
int failureShards = in.readVInt();
exceptions = new ArrayList<>(failureShards);
for (int i = 0; i < failureShards; i++) {
exceptions.add(new BroadcastShardOperationFailedException(in));
}
exceptions = in.readList(BroadcastShardOperationFailedException::new);
} else {
exceptions = null;
}
@ -594,11 +577,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
}
out.writeBoolean(exceptions != null);
if (exceptions != null) {
int failureShards = exceptions.size();
out.writeVInt(failureShards);
for (int i = 0; i < failureShards; i++) {
exceptions.get(i).writeTo(out);
}
out.writeList(exceptions);
}
}
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.ClusterServiceState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
@ -46,6 +45,7 @@ import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportService;
import java.util.function.Predicate;
import java.util.function.Supplier;
/**
@ -111,14 +111,6 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
private volatile ClusterStateObserver observer;
private final Task task;
private final ClusterStateObserver.ChangePredicate retryableOrNoBlockPredicate = new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(ClusterServiceState newState) {
ClusterBlockException blockException = checkBlock(request, newState.getClusterState());
return (blockException == null || !blockException.retryable());
}
};
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
this.task = task;
this.request = request;
@ -134,7 +126,8 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
}
protected void doStart() {
final ClusterState clusterState = observer.observedState().getClusterState();
final ClusterState clusterState = observer.observedState();
final Predicate<ClusterState> masterChangePredicate = MasterNodeChangePredicate.build(clusterState);
final DiscoveryNodes nodes = clusterState.nodes();
if (nodes.isLocalNodeElectedMaster() || localExecute(request)) {
// check for block, if blocked, retry, else, execute locally
@ -144,7 +137,10 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
listener.onFailure(blockException);
} else {
logger.trace("can't execute due to a cluster block, retrying", blockException);
retry(blockException, retryableOrNoBlockPredicate);
retry(blockException, newState -> {
ClusterBlockException newException = checkBlock(request, newState);
return (newException == null || !newException.retryable());
});
}
} else {
ActionListener<Response> delegate = new ActionListener<Response>() {
@ -158,7 +154,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
retry(t, MasterNodeChangePredicate.INSTANCE);
retry(t, masterChangePredicate);
} else {
listener.onFailure(t);
}
@ -168,14 +164,14 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
@Override
protected void doRun() throws Exception {
masterOperation(task, request, clusterService.state(), delegate);
masterOperation(task, request, clusterState, delegate);
}
});
}
} else {
if (nodes.getMasterNode() == null) {
logger.debug("no known master node, scheduling a retry");
retry(null, MasterNodeChangePredicate.INSTANCE);
retry(null, masterChangePredicate);
} else {
taskManager.registerChildTask(task, nodes.getMasterNode().getId());
transportService.sendRequest(nodes.getMasterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener, TransportMasterNodeAction.this::newResponse) {
@ -186,7 +182,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
// we want to retry here a bit to see if a new master is elected
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
actionName, nodes.getMasterNode(), exp.getDetailedMessage());
retry(cause, MasterNodeChangePredicate.INSTANCE);
retry(cause, masterChangePredicate);
} else {
listener.onFailure(exp);
}
@ -196,7 +192,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
}
}
private void retry(final Throwable failure, final ClusterStateObserver.ChangePredicate changePredicate) {
private void retry(final Throwable failure, final Predicate<ClusterState> statePredicate) {
observer.waitForNextChange(
new ClusterStateObserver.Listener() {
@Override
@ -214,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
listener.onFailure(new MasterNotDiscoveredException(failure));
}
}, changePredicate
}, statePredicate
);
}
}

View File

@ -37,4 +37,9 @@ public class BasicReplicationRequest extends ReplicationRequest<BasicReplication
public BasicReplicationRequest(ShardId shardId) {
super(shardId);
}
@Override
public String toString() {
return "BasicReplicationRequest{" + shardId + "}";
}
}

View File

@ -19,12 +19,14 @@
package org.elasticsearch.action.support.replication;
import org.elasticsearch.Version;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
@ -36,6 +38,8 @@ import java.io.IOException;
public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>> extends ReplicationRequest<R> implements WriteRequest<R> {
private RefreshPolicy refreshPolicy = RefreshPolicy.NONE;
private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
/**
* Constructor for deserialization.
*/
@ -62,11 +66,32 @@ public abstract class ReplicatedWriteRequest<R extends ReplicatedWriteRequest<R>
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
refreshPolicy = RefreshPolicy.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
seqNo = in.readZLong();
} else {
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
refreshPolicy.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeZLong(seqNo);
}
}
/**
* Returns the sequence number for this operation. The sequence number is assigned while the operation
* is performed on the primary shard.
*/
public long getSeqNo() {
return seqNo;
}
/** sets the sequence number for this operation. should only be called on the primary shard */
public void setSeqNo(long seqNo) {
this.seqNo = seqNo;
}
}

View File

@ -283,7 +283,7 @@ public class ReplicationOperation<
}
private void decPendingAndFinishIfNeeded() {
assert pendingActions.get() > 0;
assert pendingActions.get() > 0 : "pending action count goes below 0 for request [" + request + "]";
if (pendingActions.decrementAndGet() == 0) {
finish();
}

View File

@ -55,7 +55,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
*/
protected ShardId shardId;
long seqNo;
long primaryTerm;
protected TimeValue timeout = DEFAULT_TIMEOUT;
@ -171,19 +170,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return routedBasedOnClusterVersion;
}
/**
* Returns the sequence number for this operation. The sequence number is assigned while the operation
* is performed on the primary shard.
*/
public long seqNo() {
return seqNo;
}
/** sets the sequence number for this operation. should only be called on the primary shard */
public void seqNo(long seqNo) {
this.seqNo = seqNo;
}
/** returns the primary term active at the time the operation was performed on the primary shard */
public long primaryTerm() {
return primaryTerm;
@ -215,7 +201,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
timeout = new TimeValue(in);
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
seqNo = in.readVLong();
primaryTerm = in.readVLong();
}
@ -232,7 +217,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
timeout.writeTo(out);
out.writeString(index);
out.writeVLong(routedBasedOnClusterVersion);
out.writeVLong(seqNo);
out.writeVLong(primaryTerm);
}
@ -252,13 +236,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
}
@Override
public String toString() {
if (shardId != null) {
return shardId.toString();
} else {
return index;
}
}
public abstract String toString(); // force a proper to string to ease debugging
@Override
public String getDescription() {

View File

@ -23,17 +23,25 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
/**
* Base class for write action responses.
@ -66,6 +74,12 @@ public class ReplicationResponse extends ActionResponse {
public static class ShardInfo implements Streamable, ToXContent {
private static final String _SHARDS = "_shards";
private static final String TOTAL = "total";
private static final String SUCCESSFUL = "successful";
private static final String FAILED = "failed";
private static final String FAILURES = "failures";
private int total;
private int successful;
private Failure[] failures = EMPTY;
@ -120,6 +134,25 @@ public class ReplicationResponse extends ActionResponse {
return status;
}
@Override
public boolean equals(Object that) {
if (this == that) {
return true;
}
if (that == null || getClass() != that.getClass()) {
return false;
}
ShardInfo other = (ShardInfo) that;
return Objects.equals(total, other.total) &&
Objects.equals(successful, other.successful) &&
Arrays.equals(failures, other.failures);
}
@Override
public int hashCode() {
return Objects.hash(total, successful, failures);
}
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readVInt();
@ -145,12 +178,12 @@ public class ReplicationResponse extends ActionResponse {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields._SHARDS);
builder.field(Fields.TOTAL, total);
builder.field(Fields.SUCCESSFUL, successful);
builder.field(Fields.FAILED, getFailed());
builder.startObject(_SHARDS);
builder.field(TOTAL, total);
builder.field(SUCCESSFUL, successful);
builder.field(FAILED, getFailed());
if (failures.length > 0) {
builder.startArray(Fields.FAILURES);
builder.startArray(FAILURES);
for (Failure failure : failures) {
failure.toXContent(builder, params);
}
@ -160,6 +193,48 @@ public class ReplicationResponse extends ActionResponse {
return builder;
}
public static ShardInfo fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
String currentFieldName = parser.currentName();
if (_SHARDS.equals(currentFieldName) == false) {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
token = parser.nextToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
int total = 0, successful = 0;
List<Failure> failuresList = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (TOTAL.equals(currentFieldName)) {
total = parser.intValue();
} else if (SUCCESSFUL.equals(currentFieldName)) {
successful = parser.intValue();
} else if (FAILED.equals(currentFieldName) == false) {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (FAILURES.equals(currentFieldName)) {
failuresList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
failuresList.add(Failure.fromXContent(parser));
}
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
}
Failure[] failures = EMPTY;
if (failuresList != null) {
failures = failuresList.toArray(new Failure[failuresList.size()]);
}
return new ShardInfo(total, successful, failures);
}
@Override
public String toString() {
return "ShardInfo{" +
@ -177,6 +252,13 @@ public class ReplicationResponse extends ActionResponse {
public static class Failure implements ShardOperationFailedException, ToXContent {
private static final String _INDEX = "_index";
private static final String _SHARD = "_shard";
private static final String _NODE = "_node";
private static final String REASON = "reason";
private static final String STATUS = "status";
private static final String PRIMARY = "primary";
private ShardId shardId;
private String nodeId;
private Exception cause;
@ -251,6 +333,27 @@ public class ReplicationResponse extends ActionResponse {
return primary;
}
@Override
public boolean equals(Object that) {
if (this == that) {
return true;
}
if (that == null || getClass() != that.getClass()) {
return false;
}
Failure failure = (Failure) that;
return Objects.equals(primary, failure.primary) &&
Objects.equals(shardId, failure.shardId) &&
Objects.equals(nodeId, failure.nodeId) &&
Objects.equals(cause, failure.cause) &&
Objects.equals(status, failure.status);
}
@Override
public int hashCode() {
return Objects.hash(shardId, nodeId, cause, status, primary);
}
@Override
public void readFrom(StreamInput in) throws IOException {
shardId = ShardId.readShardId(in);
@ -272,39 +375,57 @@ public class ReplicationResponse extends ActionResponse {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields._INDEX, shardId.getIndexName());
builder.field(Fields._SHARD, shardId.id());
builder.field(Fields._NODE, nodeId);
builder.field(Fields.REASON);
builder.field(_INDEX, shardId.getIndexName());
builder.field(_SHARD, shardId.id());
builder.field(_NODE, nodeId);
builder.field(REASON);
builder.startObject();
ElasticsearchException.toXContent(builder, params, cause);
builder.endObject();
builder.field(Fields.STATUS, status);
builder.field(Fields.PRIMARY, primary);
builder.field(STATUS, status);
builder.field(PRIMARY, primary);
builder.endObject();
return builder;
}
private static class Fields {
public static Failure fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
private static final String _INDEX = "_index";
private static final String _SHARD = "_shard";
private static final String _NODE = "_node";
private static final String REASON = "reason";
private static final String STATUS = "status";
private static final String PRIMARY = "primary";
String shardIndex = null, nodeId = null;
int shardId = -1;
boolean primary = false;
RestStatus status = null;
ElasticsearchException reason = null;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (_INDEX.equals(currentFieldName)) {
shardIndex = parser.text();
} else if (_SHARD.equals(currentFieldName)) {
shardId = parser.intValue();
} else if (_NODE.equals(currentFieldName)) {
nodeId = parser.text();
} else if (STATUS.equals(currentFieldName)) {
status = RestStatus.valueOf(parser.text());
} else if (PRIMARY.equals(currentFieldName)) {
primary = parser.booleanValue();
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (REASON.equals(currentFieldName)) {
reason = ElasticsearchException.fromXContent(parser);
} else {
throwUnknownField(currentFieldName, parser.getTokenLocation());
}
}
}
return new Failure(new ShardId(shardIndex, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), nodeId, reason, status, primary);
}
}
private static class Fields {
private static final String _SHARDS = "_shards";
private static final String TOTAL = "total";
private static final String SUCCESSFUL = "successful";
private static final String FAILED = "failed";
private static final String FAILURES = "failures";
}
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.support.replication;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse;
@ -43,6 +44,7 @@ import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable;
@ -57,6 +59,7 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
@ -182,17 +185,19 @@ public abstract class TransportReplicationAction<
protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest, IndexShard replica) throws Exception;
/**
* Cluster level block to check before request execution
* Cluster level block to check before request execution. Returning null means that no blocks need to be checked.
*/
@Nullable
protected ClusterBlockLevel globalBlockLevel() {
return ClusterBlockLevel.WRITE;
return null;
}
/**
* Index level block to check before request execution
* Index level block to check before request execution. Returning null means that no blocks need to be checked.
*/
@Nullable
protected ClusterBlockLevel indexBlockLevel() {
return ClusterBlockLevel.WRITE;
return null;
}
/**
@ -630,7 +635,7 @@ public abstract class TransportReplicationAction<
@Override
protected void doRun() {
setPhase(task, "routing");
final ClusterState state = observer.observedState().getClusterState();
final ClusterState state = observer.observedState();
if (handleBlockExceptions(state)) {
return;
}
@ -642,6 +647,9 @@ public abstract class TransportReplicationAction<
retry(new IndexNotFoundException(concreteIndex));
return;
}
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
throw new IndexClosedException(indexMetaData.getIndex());
}
// resolve all derived request fields, so we can route and apply it
resolveRequest(state.metaData(), indexMetaData, request);
@ -664,7 +672,7 @@ public abstract class TransportReplicationAction<
private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
setPhase(task, "waiting_on_primary");
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
logger.trace("send action [{}] to local primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
}
performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId()));
@ -718,15 +726,21 @@ public abstract class TransportReplicationAction<
}
private boolean handleBlockExceptions(ClusterState state) {
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel());
if (blockException != null) {
handleBlockException(blockException);
return true;
ClusterBlockLevel globalBlockLevel = globalBlockLevel();
if (globalBlockLevel != null) {
ClusterBlockException blockException = state.blocks().globalBlockedException(globalBlockLevel);
if (blockException != null) {
handleBlockException(blockException);
return true;
}
}
blockException = state.blocks().indexBlockedException(indexBlockLevel(), concreteIndex(state));
if (blockException != null) {
handleBlockException(blockException);
return true;
ClusterBlockLevel indexBlockLevel = indexBlockLevel();
if (indexBlockLevel != null) {
ClusterBlockException blockException = state.blocks().indexBlockedException(indexBlockLevel, concreteIndex(state));
if (blockException != null) {
handleBlockException(blockException);
return true;
}
}
return false;
}
@ -950,6 +964,8 @@ public abstract class TransportReplicationAction<
public PrimaryResult perform(Request request) throws Exception {
PrimaryResult result = shardOperationOnPrimary(request, indexShard);
if (result.replicaRequest() != null) {
assert result.finalFailure == null : "a replica request [" + result.replicaRequest()
+ "] with a primary failure [" + result.finalFailure + "]";
result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm());
}
return result;
@ -983,16 +999,25 @@ public abstract class TransportReplicationAction<
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
localCheckpoint = in.readZLong();
allocationId = in.readString();
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
super.readFrom(in);
localCheckpoint = in.readZLong();
allocationId = in.readString();
} else {
// 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing.
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeZLong(localCheckpoint);
out.writeString(allocationId);
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
super.writeTo(out);
out.writeZLong(localCheckpoint);
out.writeString(allocationId);
} else {
// we use to write empty responses
Empty.INSTANCE.writeTo(out);
}
}
@Override
@ -1016,10 +1041,9 @@ public abstract class TransportReplicationAction<
listener.onFailure(new NoNodeAvailableException("unknown node [" + nodeId + "]"));
return;
}
transportService.sendRequest(node, transportReplicaAction,
new ConcreteShardRequest<>(request, replica.allocationId().getId()), transportOptions,
// Eclipse can't handle when this is <> so we specify the type here.
new ActionListenerResponseHandler<ReplicaResponse>(listener, ReplicaResponse::new));
final ConcreteShardRequest<ReplicaRequest> concreteShardRequest =
new ConcreteShardRequest<>(request, replica.allocationId().getId());
sendReplicaRequest(concreteShardRequest, node, listener);
}
@Override
@ -1060,6 +1084,14 @@ public abstract class TransportReplicationAction<
}
}
/** sends the given replica request to the supplied nodes */
protected void sendReplicaRequest(ConcreteShardRequest<ReplicaRequest> concreteShardRequest, DiscoveryNode node,
ActionListener<ReplicationOperation.ReplicaResponse> listener) {
transportService.sendRequest(node, transportReplicaAction, concreteShardRequest, transportOptions,
// Eclipse can't handle when this is <> so we specify the type here.
new ActionListenerResponseHandler<ReplicaResponse>(listener, ReplicaResponse::new));
}
/** a wrapper class to encapsulate a request when being sent to a specific allocation id **/
public static final class ConcreteShardRequest<R extends TransportRequest> extends TransportRequest {
@ -1124,6 +1156,11 @@ public abstract class TransportReplicationAction<
public String getTargetAllocationID() {
return targetAllocationID;
}
@Override
public String toString() {
return "request: " + request + ", target allocation id: " + targetAllocationID;
}
}
/**

View File

@ -25,6 +25,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
@ -184,6 +185,16 @@ public abstract class TransportWriteAction<
}
}
@Override
protected ClusterBlockLevel globalBlockLevel() {
return ClusterBlockLevel.WRITE;
}
@Override
protected ClusterBlockLevel indexBlockLevel() {
return ClusterBlockLevel.WRITE;
}
/**
* callback used by {@link AsyncAfterWriteAction} to notify that all post
* process actions have been executed

View File

@ -39,12 +39,12 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
@ -124,7 +124,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
}
protected void doStart() {
final ClusterState clusterState = observer.observedState().getClusterState();
final ClusterState clusterState = observer.observedState();
nodes = clusterState.nodes();
try {
ClusterBlockException blockException = checkGlobalBlock(clusterState);

View File

@ -26,10 +26,8 @@ import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
@ -88,43 +86,41 @@ public class MultiTermVectorsRequest extends ActionRequest implements Iterable<T
return requests;
}
public void add(TermVectorsRequest template, BytesReference data) throws IOException {
public void add(TermVectorsRequest template, @Nullable XContentParser parser) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
if (data.length() > 0) {
try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("docs array element should include an object");
}
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
add(termVectorsRequest);
if (parser != null) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("docs array element should include an object");
}
} else if ("ids".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
ids.add(parser.text());
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
add(termVectorsRequest);
}
} else if ("ids".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type ARRAY", currentFieldName);
ids.add(parser.text());
}
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
if ("parameters".equals(currentFieldName)) {
TermVectorsRequest.parseRequest(template, parser);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type OBJECT", currentFieldName);
}
} else if (currentFieldName != null) {
throw new ElasticsearchParseException("_mtermvectors: Parameter [{}] not supported", currentFieldName);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type ARRAY", currentFieldName);
}
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
if ("parameters".equals(currentFieldName)) {
TermVectorsRequest.parseRequest(template, parser);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type OBJECT", currentFieldName);
}
} else if (currentFieldName != null) {
throw new ElasticsearchParseException("_mtermvectors: Parameter [{}] not supported", currentFieldName);
}
}
}

View File

@ -71,7 +71,7 @@ final class TermVectorsWriter {
// if no terms found, take the retrieved term vector fields for stats
if (topLevelTerms == null) {
topLevelTerms = fieldTermVector;
topLevelTerms = EMPTY_TERMS;
}
TermsEnum topLevelIterator = topLevelTerms.iterator();
@ -292,4 +292,18 @@ final class TermVectorsWriter {
// further...
output.writeVLong(Math.max(0, value + 1));
}
/** Implements an empty {@link Terms}. */
private static final Terms EMPTY_TERMS = new Terms() {
@Override public TermsEnum iterator() throws IOException { return TermsEnum.EMPTY; }
@Override public long size() throws IOException { return 0; }
@Override public long getSumTotalTermFreq() throws IOException { return 0; }
@Override public long getSumDocFreq() throws IOException { return 0; }
@Override public int getDocCount() throws IOException { return 0; }
@Override public boolean hasFreqs() { return false; }
@Override public boolean hasOffsets() { return false; }
@Override public boolean hasPositions() { return false; }
@Override public boolean hasPayloads() { return false; }
};
}

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@ -55,6 +56,13 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
@Override
protected ShardIterator shards(ClusterState state, InternalRequest request) {
if (request.request().doc() != null && request.request().routing() == null) {
// artificial document without routing specified, ignore its "id" and use either random shard or according to preference
GroupShardsIterator groupShardsIter = clusterService.operationRouting().searchShards(state,
new String[] { request.concreteIndex() }, null, request.request().preference());
return groupShardsIter.iterator().next();
}
return clusterService.operationRouting().getShards(state, request.concreteIndex(), request.request().id(),
request.request().routing(), request.request().preference());
}

View File

@ -28,8 +28,6 @@ import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
@ -689,18 +687,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return upsertRequest;
}
public UpdateRequest fromXContent(XContentBuilder source) throws Exception {
return fromXContent(source.bytes());
}
public UpdateRequest fromXContent(byte[] source) throws Exception {
return fromXContent(source, 0, source.length);
}
public UpdateRequest fromXContent(byte[] source, int offset, int length) throws Exception {
return fromXContent(new BytesArray(source, offset, length));
}
/**
* Should this update attempt to detect if it is a noop? Defaults to true.
* @return this for chaining
@ -717,52 +703,48 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return detectNoop;
}
public UpdateRequest fromXContent(BytesReference source) throws IOException {
public UpdateRequest fromXContent(XContentParser parser) throws IOException {
Script script = null;
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
XContentParser.Token token = parser.nextToken();
if (token == null) {
return this;
}
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("script".equals(currentFieldName)) {
script = Script.parse(parser, ParseFieldMatcher.EMPTY);
} else if ("scripted_upsert".equals(currentFieldName)) {
scriptedUpsert = parser.booleanValue();
} else if ("upsert".equals(currentFieldName)) {
XContentType xContentType = XContentFactory.xContentType(source);
XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
builder.copyCurrentStructure(parser);
safeUpsertRequest().source(builder);
} else if ("doc".equals(currentFieldName)) {
XContentType xContentType = XContentFactory.xContentType(source);
XContentBuilder docBuilder = XContentFactory.contentBuilder(xContentType);
docBuilder.copyCurrentStructure(parser);
safeDoc().source(docBuilder);
} else if ("doc_as_upsert".equals(currentFieldName)) {
docAsUpsert(parser.booleanValue());
} else if ("detect_noop".equals(currentFieldName)) {
detectNoop(parser.booleanValue());
} else if ("fields".equals(currentFieldName)) {
List<Object> fields = null;
if (token == XContentParser.Token.START_ARRAY) {
fields = (List) parser.list();
} else if (token.isValue()) {
fields = Collections.singletonList(parser.text());
}
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
XContentParser.Token token = parser.nextToken();
if (token == null) {
return this;
}
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("script".equals(currentFieldName)) {
script = Script.parse(parser, ParseFieldMatcher.EMPTY);
} else if ("scripted_upsert".equals(currentFieldName)) {
scriptedUpsert = parser.booleanValue();
} else if ("upsert".equals(currentFieldName)) {
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
builder.copyCurrentStructure(parser);
safeUpsertRequest().source(builder);
} else if ("doc".equals(currentFieldName)) {
XContentBuilder docBuilder = XContentFactory.contentBuilder(parser.contentType());
docBuilder.copyCurrentStructure(parser);
safeDoc().source(docBuilder);
} else if ("doc_as_upsert".equals(currentFieldName)) {
docAsUpsert(parser.booleanValue());
} else if ("detect_noop".equals(currentFieldName)) {
detectNoop(parser.booleanValue());
} else if ("fields".equals(currentFieldName)) {
List<Object> fields = null;
if (token == XContentParser.Token.START_ARRAY) {
fields = (List) parser.list();
} else if (token.isValue()) {
fields = Collections.singletonList(parser.text());
}
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
}
if (script != null) {
this.script = script;
}
}
if (script != null) {
this.script = script;
}
return this;
}

View File

@ -30,11 +30,13 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
@ -55,7 +57,8 @@ import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.security.NoSuchAlgorithmException;
import java.util.Map;
import java.util.List;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
/**
@ -92,7 +95,7 @@ final class Bootstrap {
}
/** initialize native resources */
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean systemCallFilter, boolean ctrlHandler) {
final Logger logger = Loggers.getLogger(Bootstrap.class);
// check if the user is running as root, and bail
@ -100,9 +103,9 @@ final class Bootstrap {
throw new RuntimeException("can not run elasticsearch as root");
}
// enable secure computing mode
if (seccomp) {
Natives.trySeccomp(tmpFile);
// enable system call filter
if (systemCallFilter) {
Natives.tryInstallSystemCallFilter(tmpFile);
}
// mlockall if requested
@ -176,7 +179,7 @@ final class Bootstrap {
initializeNatives(
environment.tmpFile(),
BootstrapSettings.MEMORY_LOCK_SETTING.get(settings),
BootstrapSettings.SECCOMP_SETTING.get(settings),
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings),
BootstrapSettings.CTRLHANDLER_SETTING.get(settings));
// initialize probes before the security manager is installed
@ -215,19 +218,20 @@ final class Bootstrap {
@Override
protected void validateNodeBeforeAcceptingRequests(
final Settings settings,
final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
BootstrapCheck.check(settings, boundTransportAddress);
final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> checks) throws NodeValidationException {
BootstrapChecks.check(settings, boundTransportAddress, checks);
}
};
}
private static Environment initialEnvironment(boolean foreground, Path pidFile, Map<String, String> esSettings) {
private static Environment initialEnvironment(boolean foreground, Path pidFile, Settings initialSettings) {
Terminal terminal = foreground ? Terminal.DEFAULT : null;
Settings.Builder builder = Settings.builder();
if (pidFile != null) {
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
}
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
builder.put(initialSettings);
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap());
}
private void start() throws NodeValidationException {
@ -257,7 +261,7 @@ final class Bootstrap {
final boolean foreground,
final Path pidFile,
final boolean quiet,
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException, UserException {
final Settings initialSettings) throws BootstrapException, NodeValidationException, UserException {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
@ -267,7 +271,7 @@ final class Bootstrap {
INSTANCE = new Bootstrap();
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
Environment environment = initialEnvironment(foreground, pidFile, initialSettings);
try {
LogConfigurator.configure(environment);
} catch (IOException e) {

View File

@ -19,582 +19,27 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is
* running in production and all bootstrap checks must pass.
* Encapsulates a bootstrap check.
*/
final class BootstrapCheck {
private BootstrapCheck() {
}
public interface BootstrapCheck {
/**
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface.
* Test if the node fails the check.
*
* @param settings the current node settings
* @param boundTransportAddress the node network bindings
* @return {@code true} if the node failed the check
*/
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
check(
enforceLimits(boundTransportAddress),
checks(settings),
Node.NODE_NAME_SETTING.get(settings));
}
boolean check();
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
* The error message for a failed check.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param nodeName the node name to be used as a logging prefix
* @return the error message on check failure
*/
static void check(
final boolean enforceLimits,
final List<Check> checks,
final String nodeName) throws NodeValidationException {
check(enforceLimits, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param logger the logger to
*/
static void check(
final boolean enforceLimits,
final List<Check> checks,
final Logger logger) throws NodeValidationException {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
for (final Check check : checks) {
if (check.check()) {
if (!enforceLimits && !check.alwaysEnforce()) {
ignoredErrors.add(check.errorMessage());
} else {
errors.add(check.errorMessage());
}
}
}
if (!ignoredErrors.isEmpty()) {
ignoredErrors.forEach(error -> log(logger, error));
}
if (!errors.isEmpty()) {
final List<String> messages = new ArrayList<>(1 + errors.size());
messages.add("bootstrap checks failed");
messages.addAll(errors);
final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
throw ne;
}
}
static void log(final Logger logger, final String error) {
logger.warn(error);
}
/**
* Tests if the checks should be enforced.
*
* @param boundTransportAddress the node network bindings
* @return {@code true} if the checks should be enforced
*/
static boolean enforceLimits(BoundTransportAddress boundTransportAddress) {
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress()
|| t.address().getAddress().isLoopbackAddress();
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
}
// the list of checks to execute
static List<Check> checks(final Settings settings) {
final List<Check> checks = new ArrayList<>();
checks.add(new HeapSizeCheck());
final FileDescriptorCheck fileDescriptorCheck
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings)));
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
if (Constants.LINUX) {
checks.add(new MaxMapCountCheck());
}
checks.add(new ClientJvmCheck());
checks.add(new UseSerialGCCheck());
checks.add(new OnErrorCheck());
checks.add(new OnOutOfMemoryErrorCheck());
checks.add(new G1GCCheck());
return Collections.unmodifiableList(checks);
}
/**
* Encapsulates a bootstrap check.
*/
interface Check {
/**
* Test if the node fails the check.
*
* @return {@code true} if the node failed the check
*/
boolean check();
/**
* The error message for a failed check.
*
* @return the error message on check failure
*/
String errorMessage();
default boolean alwaysEnforce() {
return false;
}
}
static class HeapSizeCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
final long initialHeapSize = getInitialHeapSize();
final long maxHeapSize = getMaxHeapSize();
return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"initial heap size [%d] not equal to maximum heap size [%d]; " +
"this can cause resize pauses and prevents mlockall from locking the entire heap",
getInitialHeapSize(),
getMaxHeapSize()
);
}
// visible for testing
long getInitialHeapSize() {
return JvmInfo.jvmInfo().getConfiguredInitialHeapSize();
}
// visible for testing
long getMaxHeapSize() {
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
}
}
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
public OsXFileDescriptorCheck() {
// see constant OPEN_MAX defined in
// /usr/include/sys/syslimits.h on OS X and its use in JVM
// initialization in int os:init_2(void) defined in the JVM
// code for BSD (contains OS X)
super(10240);
}
}
static class FileDescriptorCheck implements Check {
private final int limit;
FileDescriptorCheck() {
this(1 << 16);
}
protected FileDescriptorCheck(final int limit) {
if (limit <= 0) {
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
}
this.limit = limit;
}
public final boolean check() {
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
}
@Override
public final String errorMessage() {
return String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
);
}
// visible for testing
long getMaxFileDescriptorCount() {
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
}
}
static class MlockallCheck implements Check {
private final boolean mlockallSet;
public MlockallCheck(final boolean mlockAllSet) {
this.mlockallSet = mlockAllSet;
}
@Override
public boolean check() {
return mlockallSet && !isMemoryLocked();
}
@Override
public String errorMessage() {
return "memory locking requested for elasticsearch process but memory is not locked";
}
// visible for testing
boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}
static class MaxNumberOfThreadsCheck implements Check {
// this should be plenty for machines up to 256 cores
private final long maxNumberOfThreadsThreshold = 1 << 12;
@Override
public boolean check() {
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max number of threads [%d] for user [%s] is too low, increase to at least [%d]",
getMaxNumberOfThreads(),
BootstrapInfo.getSystemProperties().get("user.name"),
maxNumberOfThreadsThreshold);
}
// visible for testing
long getMaxNumberOfThreads() {
return JNANatives.MAX_NUMBER_OF_THREADS;
}
}
static class MaxSizeVirtualMemoryCheck implements Check {
@Override
public boolean check() {
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]",
getMaxSizeVirtualMemory(),
BootstrapInfo.getSystemProperties().get("user.name"));
}
// visible for testing
long getRlimInfinity() {
return JNACLibrary.RLIM_INFINITY;
}
// visible for testing
long getMaxSizeVirtualMemory() {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
}
}
static class MaxMapCountCheck implements Check {
private final long limit = 1 << 18;
@Override
public boolean check() {
return getMaxMapCount() != -1 && getMaxMapCount() < limit;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
getMaxMapCount(),
limit);
}
// visible for testing
long getMaxMapCount() {
return getMaxMapCount(Loggers.getLogger(BootstrapCheck.class));
}
// visible for testing
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
if (rawProcSysVmMaxMapCount != null) {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
}
}
} catch (final IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
private Path getProcSysVmMaxMapCountPath() {
return PathUtils.get("/proc/sys/vm/max_map_count");
}
// visible for testing
BufferedReader getBufferedReader(final Path path) throws IOException {
return Files.newBufferedReader(path);
}
// visible for testing
String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
return bufferedReader.readLine();
}
// visible for testing
long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
return Long.parseLong(procSysVmMaxMapCount);
}
}
static class ClientJvmCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return getVmName().toLowerCase(Locale.ROOT).contains("client");
}
// visible for testing
String getVmName() {
return JvmInfo.jvmInfo().getVmName();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the client VM [%s] but should be using a server VM for the best performance",
getVmName());
}
}
/**
* Checks if the serial collector is in use. This collector is single-threaded and devastating
* for performance and should not be used for a server application like Elasticsearch.
*/
static class UseSerialGCCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return getUseSerialGC().equals("true");
}
// visible for testing
String getUseSerialGC() {
return JvmInfo.jvmInfo().useSerialGC();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the serial collector but should not be for the best performance; " +
"either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified",
JvmInfo.jvmInfo().getVmName());
}
}
abstract static class MightForkCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return isSeccompInstalled() && mightFork();
}
// visible for testing
boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
// visible for testing
abstract boolean mightFork();
@Override
public final boolean alwaysEnforce() {
return true;
}
}
static class OnErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onError = onError();
return onError != null && !onError.equals("");
}
// visible for testing
String onError() {
return JvmInfo.jvmInfo().onError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
static class OnOutOfMemoryErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onOutOfMemoryError = onOutOfMemoryError();
return onOutOfMemoryError != null && !onOutOfMemoryError.equals("");
}
// visible for testing
String onOutOfMemoryError() {
return JvmInfo.jvmInfo().onOutOfMemoryError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onOutOfMemoryError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
/**
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
*/
static class G1GCCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) {
final String jvmVersion = jvmVersion();
// HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223
final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+");
final Matcher matcher = pattern.matcher(jvmVersion);
final boolean matches = matcher.matches();
assert matches : jvmVersion;
final int major = Integer.parseInt(matcher.group(1));
final int update = Integer.parseInt(matcher.group(2));
// HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40
return major == 25 && update < 40;
} else {
return false;
}
}
// visible for testing
String jvmVendor() {
return Constants.JVM_VENDOR;
}
// visible for testing
boolean isG1GCEnabled() {
assert "Oracle Corporation".equals(jvmVendor());
return JvmInfo.jvmInfo().useG1GC().equals("true");
}
// visible for testing
String jvmVersion() {
assert "Oracle Corporation".equals(jvmVendor());
return Constants.JVM_VERSION;
}
// visible for testing
boolean isJava8() {
assert "Oracle Corporation".equals(jvmVendor());
return JavaVersion.current().equals(JavaVersion.parse("1.8"));
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion());
}
String errorMessage();
default boolean alwaysEnforce() {
return false;
}
}

View File

@ -0,0 +1,609 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is
* running in production and all bootstrap checks must pass.
*/
final class BootstrapChecks {
private BootstrapChecks() {
}
/**
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface.
*
* @param settings the current node settings
* @param boundTransportAddress the node network bindings
*/
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> additionalChecks)
throws NodeValidationException {
final List<BootstrapCheck> builtInChecks = checks(settings);
final List<BootstrapCheck> combinedChecks = new ArrayList<>(builtInChecks);
combinedChecks.addAll(additionalChecks);
check(
enforceLimits(boundTransportAddress),
Collections.unmodifiableList(combinedChecks),
Node.NODE_NAME_SETTING.get(settings));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param nodeName the node name to be used as a logging prefix
*/
static void check(
final boolean enforceLimits,
final List<BootstrapCheck> checks,
final String nodeName) throws NodeValidationException {
check(enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class, nodeName));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param logger the logger to
*/
static void check(
final boolean enforceLimits,
final List<BootstrapCheck> checks,
final Logger logger) throws NodeValidationException {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
for (final BootstrapCheck check : checks) {
if (check.check()) {
if (!enforceLimits && !check.alwaysEnforce()) {
ignoredErrors.add(check.errorMessage());
} else {
errors.add(check.errorMessage());
}
}
}
if (!ignoredErrors.isEmpty()) {
ignoredErrors.forEach(error -> log(logger, error));
}
if (!errors.isEmpty()) {
final List<String> messages = new ArrayList<>(1 + errors.size());
messages.add("bootstrap checks failed");
messages.addAll(errors);
final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
throw ne;
}
}
static void log(final Logger logger, final String error) {
logger.warn(error);
}
/**
* Tests if the checks should be enforced.
*
* @param boundTransportAddress the node network bindings
* @return {@code true} if the checks should be enforced
*/
static boolean enforceLimits(BoundTransportAddress boundTransportAddress) {
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress()
|| t.address().getAddress().isLoopbackAddress();
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
}
// the list of checks to execute
static List<BootstrapCheck> checks(final Settings settings) {
final List<BootstrapCheck> checks = new ArrayList<>();
checks.add(new HeapSizeCheck());
final FileDescriptorCheck fileDescriptorCheck
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings)));
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
if (Constants.LINUX) {
checks.add(new MaxMapCountCheck());
}
checks.add(new ClientJvmCheck());
checks.add(new UseSerialGCCheck());
checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings)));
checks.add(new OnErrorCheck());
checks.add(new OnOutOfMemoryErrorCheck());
checks.add(new G1GCCheck());
return Collections.unmodifiableList(checks);
}
static class HeapSizeCheck implements BootstrapCheck {
@Override
public boolean check() {
final long initialHeapSize = getInitialHeapSize();
final long maxHeapSize = getMaxHeapSize();
return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"initial heap size [%d] not equal to maximum heap size [%d]; " +
"this can cause resize pauses and prevents mlockall from locking the entire heap",
getInitialHeapSize(),
getMaxHeapSize()
);
}
// visible for testing
long getInitialHeapSize() {
return JvmInfo.jvmInfo().getConfiguredInitialHeapSize();
}
// visible for testing
long getMaxHeapSize() {
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
}
}
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
public OsXFileDescriptorCheck() {
// see constant OPEN_MAX defined in
// /usr/include/sys/syslimits.h on OS X and its use in JVM
// initialization in int os:init_2(void) defined in the JVM
// code for BSD (contains OS X)
super(10240);
}
}
static class FileDescriptorCheck implements BootstrapCheck {
private final int limit;
FileDescriptorCheck() {
this(1 << 16);
}
protected FileDescriptorCheck(final int limit) {
if (limit <= 0) {
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
}
this.limit = limit;
}
public final boolean check() {
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
}
@Override
public final String errorMessage() {
return String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
);
}
// visible for testing
long getMaxFileDescriptorCount() {
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
}
}
static class MlockallCheck implements BootstrapCheck {
private final boolean mlockallSet;
public MlockallCheck(final boolean mlockAllSet) {
this.mlockallSet = mlockAllSet;
}
@Override
public boolean check() {
return mlockallSet && !isMemoryLocked();
}
@Override
public String errorMessage() {
return "memory locking requested for elasticsearch process but memory is not locked";
}
// visible for testing
boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}
static class MaxNumberOfThreadsCheck implements BootstrapCheck {
// this should be plenty for machines up to 256 cores
private final long maxNumberOfThreadsThreshold = 1 << 12;
@Override
public boolean check() {
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max number of threads [%d] for user [%s] is too low, increase to at least [%d]",
getMaxNumberOfThreads(),
BootstrapInfo.getSystemProperties().get("user.name"),
maxNumberOfThreadsThreshold);
}
// visible for testing
long getMaxNumberOfThreads() {
return JNANatives.MAX_NUMBER_OF_THREADS;
}
}
static class MaxSizeVirtualMemoryCheck implements BootstrapCheck {
@Override
public boolean check() {
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]",
getMaxSizeVirtualMemory(),
BootstrapInfo.getSystemProperties().get("user.name"));
}
// visible for testing
long getRlimInfinity() {
return JNACLibrary.RLIM_INFINITY;
}
// visible for testing
long getMaxSizeVirtualMemory() {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
}
}
static class MaxMapCountCheck implements BootstrapCheck {
private final long limit = 1 << 18;
@Override
public boolean check() {
return getMaxMapCount() != -1 && getMaxMapCount() < limit;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
getMaxMapCount(),
limit);
}
// visible for testing
long getMaxMapCount() {
return getMaxMapCount(Loggers.getLogger(BootstrapChecks.class));
}
// visible for testing
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
if (rawProcSysVmMaxMapCount != null) {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
}
}
} catch (final IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
private Path getProcSysVmMaxMapCountPath() {
return PathUtils.get("/proc/sys/vm/max_map_count");
}
// visible for testing
BufferedReader getBufferedReader(final Path path) throws IOException {
return Files.newBufferedReader(path);
}
// visible for testing
String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
return bufferedReader.readLine();
}
// visible for testing
long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
return Long.parseLong(procSysVmMaxMapCount);
}
}
static class ClientJvmCheck implements BootstrapCheck {
@Override
public boolean check() {
return getVmName().toLowerCase(Locale.ROOT).contains("client");
}
// visible for testing
String getVmName() {
return JvmInfo.jvmInfo().getVmName();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the client VM [%s] but should be using a server VM for the best performance",
getVmName());
}
}
/**
* Checks if the serial collector is in use. This collector is single-threaded and devastating
* for performance and should not be used for a server application like Elasticsearch.
*/
static class UseSerialGCCheck implements BootstrapCheck {
@Override
public boolean check() {
return getUseSerialGC().equals("true");
}
// visible for testing
String getUseSerialGC() {
return JvmInfo.jvmInfo().useSerialGC();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the serial collector but should not be for the best performance; " +
"either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified",
JvmInfo.jvmInfo().getVmName());
}
}
/**
* Bootstrap check that if system call filters are enabled, then system call filters must have installed successfully.
*/
static class SystemCallFilterCheck implements BootstrapCheck {
private final boolean areSystemCallFiltersEnabled;
SystemCallFilterCheck(final boolean areSystemCallFiltersEnabled) {
this.areSystemCallFiltersEnabled = areSystemCallFiltersEnabled;
}
@Override
public boolean check() {
return areSystemCallFiltersEnabled && !isSystemCallFilterInstalled();
}
// visible for testing
boolean isSystemCallFilterInstalled() {
return Natives.isSystemCallFilterInstalled();
}
@Override
public String errorMessage() {
return "system call filters failed to install; " +
"check the logs and fix your configuration or disable system call filters at your own risk";
}
}
abstract static class MightForkCheck implements BootstrapCheck {
@Override
public boolean check() {
return isSystemCallFilterInstalled() && mightFork();
}
// visible for testing
boolean isSystemCallFilterInstalled() {
return Natives.isSystemCallFilterInstalled();
}
// visible for testing
abstract boolean mightFork();
@Override
public final boolean alwaysEnforce() {
return true;
}
}
static class OnErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onError = onError();
return onError != null && !onError.equals("");
}
// visible for testing
String onError() {
return JvmInfo.jvmInfo().onError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onError(),
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey());
}
}
static class OnOutOfMemoryErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onOutOfMemoryError = onOutOfMemoryError();
return onOutOfMemoryError != null && !onOutOfMemoryError.equals("");
}
// visible for testing
String onOutOfMemoryError() {
return JvmInfo.jvmInfo().onOutOfMemoryError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onOutOfMemoryError(),
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey());
}
}
/**
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
*/
static class G1GCCheck implements BootstrapCheck {
@Override
public boolean check() {
if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) {
final String jvmVersion = jvmVersion();
// HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223
final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+");
final Matcher matcher = pattern.matcher(jvmVersion);
final boolean matches = matcher.matches();
assert matches : jvmVersion;
final int major = Integer.parseInt(matcher.group(1));
final int update = Integer.parseInt(matcher.group(2));
// HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40
return major == 25 && update < 40;
} else {
return false;
}
}
// visible for testing
String jvmVendor() {
return Constants.JVM_VENDOR;
}
// visible for testing
boolean isG1GCEnabled() {
assert "Oracle Corporation".equals(jvmVendor());
return JvmInfo.jvmInfo().useG1GC().equals("true");
}
// visible for testing
String jvmVersion() {
assert "Oracle Corporation".equals(jvmVendor());
return Constants.JVM_VERSION;
}
// visible for testing
boolean isJava8() {
assert "Oracle Corporation".equals(jvmVendor());
return JavaVersion.current().equals(JavaVersion.parse("1.8"));
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion());
}
}
}

View File

@ -20,14 +20,14 @@
package org.elasticsearch.bootstrap;
import java.nio.file.Path;
import java.util.Map;
/**
* Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked
* during bootstrap should explicitly declare the checked exceptions that they can throw, rather
* than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to
* declare all of these checked exceptions.
* these checked exceptions so that
* {@link Bootstrap#init(boolean, Path, boolean, org.elasticsearch.common.settings.Settings)}
* does not have to declare all of these checked exceptions.
*/
class BootstrapException extends Exception {

View File

@ -24,16 +24,16 @@ import org.elasticsearch.common.SuppressForbidden;
import java.util.Dictionary;
import java.util.Enumeration;
/**
* Exposes system startup information
/**
* Exposes system startup information
*/
@SuppressForbidden(reason = "exposes read-only view of system properties")
public final class BootstrapInfo {
/** no instantiation */
private BootstrapInfo() {}
/**
/**
* Returns true if we successfully loaded native libraries.
* <p>
* If this returns false, then native operations such as locking
@ -42,19 +42,19 @@ public final class BootstrapInfo {
public static boolean isNativesAvailable() {
return Natives.JNA_AVAILABLE;
}
/**
/**
* Returns true if we were able to lock the process's address space.
*/
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
/**
* Returns true if secure computing mode is enabled (supported systems only)
* Returns true if system call filter is installed (supported systems only)
*/
public static boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
public static boolean isSystemCallFilterInstalled() {
return Natives.isSystemCallFilterInstalled();
}
/**

View File

@ -33,8 +33,8 @@ public final class BootstrapSettings {
public static final Setting<Boolean> MEMORY_LOCK_SETTING =
Setting.boolSetting("bootstrap.memory_lock", false, Property.NodeScope);
public static final Setting<Boolean> SECCOMP_SETTING =
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
public static final Setting<Boolean> SYSTEM_CALL_FILTER_SETTING =
Setting.boolSetting("bootstrap.system_call_filter", true, Property.NodeScope);
public static final Setting<Boolean> CTRLHANDLER_SETTING =
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);

View File

@ -25,9 +25,11 @@ import joptsimple.OptionSpecBuilder;
import joptsimple.util.PathConverter;
import org.elasticsearch.Build;
import org.elasticsearch.cli.ExitCodes;
import org.elasticsearch.cli.SettingCommand;
import org.elasticsearch.cli.EnvironmentAwareCommand;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.NodeValidationException;
@ -40,7 +42,7 @@ import java.util.Map;
/**
* This class starts elasticsearch.
*/
class Elasticsearch extends SettingCommand {
class Elasticsearch extends EnvironmentAwareCommand {
private final OptionSpecBuilder versionOption;
private final OptionSpecBuilder daemonizeOption;
@ -90,7 +92,7 @@ class Elasticsearch extends SettingCommand {
}
@Override
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws UserException {
protected void execute(Terminal terminal, OptionSet options, Environment env) throws UserException {
if (options.nonOptionArguments().isEmpty() == false) {
throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
}
@ -109,16 +111,16 @@ class Elasticsearch extends SettingCommand {
final boolean quiet = options.has(quietOption);
try {
init(daemonize, pidFile, quiet, settings);
init(daemonize, pidFile, quiet, env.settings());
} catch (NodeValidationException e) {
throw new UserException(ExitCodes.CONFIG, e.getMessage());
}
}
void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
void init(final boolean daemonize, final Path pidFile, final boolean quiet, Settings initialSettings)
throws NodeValidationException, UserException {
try {
Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
Bootstrap.init(!daemonize, pidFile, quiet, initialSettings);
} catch (BootstrapException | RuntimeException e) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.

View File

@ -61,7 +61,7 @@ final class JNACLibrary {
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "rlim_cur", "rlim_max" });
return Arrays.asList("rlim_cur", "rlim_max");
}
}

View File

@ -149,7 +149,7 @@ final class JNAKernel32Library {
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[]{"BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"});
return Arrays.asList("BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type");
}
}
@ -261,10 +261,8 @@ final class JNAKernel32Library {
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] {
"PerProcessUserTimeLimit", "PerJobUserTimeLimit", "LimitFlags", "MinimumWorkingSetSize",
"MaximumWorkingSetSize", "ActiveProcessLimit", "Affinity", "PriorityClass", "SchedulingClass"
});
return Arrays.asList("PerProcessUserTimeLimit", "PerJobUserTimeLimit", "LimitFlags", "MinimumWorkingSetSize",
"MaximumWorkingSetSize", "ActiveProcessLimit", "Affinity", "PriorityClass", "SchedulingClass");
}
}

View File

@ -43,11 +43,11 @@ class JNANatives {
// Set to true, in case native mlockall call was successful
static boolean LOCAL_MLOCKALL = false;
// Set to true, in case native seccomp call was successful
static boolean LOCAL_SECCOMP = false;
// Set to true, in case native system call filter install was successful
static boolean LOCAL_SYSTEM_CALL_FILTER = false;
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
// otherwise they are only inherited for new threads (ES app threads)
static boolean LOCAL_SECCOMP_ALL = false;
static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false;
// set to the maximum number of threads that can be created for
// the user ID that owns the running Elasticsearch process
static long MAX_NUMBER_OF_THREADS = -1;
@ -210,12 +210,12 @@ class JNANatives {
}
}
static void trySeccomp(Path tmpFile) {
static void tryInstallSystemCallFilter(Path tmpFile) {
try {
int ret = Seccomp.init(tmpFile);
LOCAL_SECCOMP = true;
int ret = SystemCallFilter.init(tmpFile);
LOCAL_SYSTEM_CALL_FILTER = true;
if (ret == 1) {
LOCAL_SECCOMP_ALL = true;
LOCAL_SYSTEM_CALL_FILTER_ALL = true;
}
} catch (Exception e) {
// this is likely to happen unless the kernel is newish, its a best effort at the moment

View File

@ -33,9 +33,7 @@ public class JavaVersion implements Comparable<JavaVersion> {
}
private JavaVersion(List<Integer> version) {
if (version.size() >= 2
&& version.get(0).intValue() == 1
&& version.get(1).intValue() == 8) {
if (version.size() >= 2 && version.get(0) == 1 && version.get(1) == 8) {
// for Java 8 there is ambiguity since both 1.8 and 8 are supported,
// so we rewrite the former to the latter
version = new ArrayList<>(version.subList(1, version.size()));

View File

@ -91,12 +91,12 @@ final class Natives {
return JNANatives.LOCAL_MLOCKALL;
}
static void trySeccomp(Path tmpFile) {
static void tryInstallSystemCallFilter(Path tmpFile) {
if (!JNA_AVAILABLE) {
logger.warn("cannot install syscall filters because JNA is not available");
logger.warn("cannot install system call filter because JNA is not available");
return;
}
JNANatives.trySeccomp(tmpFile);
JNANatives.tryInstallSystemCallFilter(tmpFile);
}
static void trySetMaxNumberOfThreads() {
@ -115,10 +115,10 @@ final class Natives {
JNANatives.trySetMaxSizeVirtualMemory();
}
static boolean isSeccompInstalled() {
static boolean isSystemCallFilterInstalled() {
if (!JNA_AVAILABLE) {
return false;
}
return JNANatives.LOCAL_SECCOMP;
return JNANatives.LOCAL_SYSTEM_CALL_FILTER;
}
}

View File

@ -34,7 +34,7 @@ import java.util.List;
import java.util.Locale;
/**
* Spawns native plugin controller processes if present. Will only work prior to seccomp being set up.
* Spawns native plugin controller processes if present. Will only work prior to a system call filter being installed.
*/
final class Spawner implements Closeable {

View File

@ -104,7 +104,7 @@ final class StartupException extends RuntimeException {
continue;
}
consumer.accept("\tat " + line.toString());
consumer.accept("\tat " + line);
linesWritten++;
}
}

View File

@ -43,8 +43,7 @@ import java.util.List;
import java.util.Map;
/**
* Installs a limited form of secure computing mode,
* to filters system calls to block process execution.
* Installs a system call filter to block process execution.
* <p>
* This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows.
* <p>
@ -91,8 +90,8 @@ import java.util.Map;
* https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html</a>
*/
// not an example of how to write code!!!
final class Seccomp {
private static final Logger logger = Loggers.getLogger(Seccomp.class);
final class SystemCallFilter {
private static final Logger logger = Loggers.getLogger(SystemCallFilter.class);
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering
@ -172,7 +171,7 @@ final class Seccomp {
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "len", "filter" });
return Arrays.asList("len", "filter");
}
}
@ -269,7 +268,8 @@ final class Seccomp {
// we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug
if (linux_libc == null) {
throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ " +
"with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
}
// pure paranoia:
@ -319,7 +319,8 @@ final class Seccomp {
switch (errno) {
case ENOSYS: break; // ok
case EINVAL: break; // ok
default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno));
default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): "
+ JNACLibrary.strerror(errno));
}
}
@ -346,7 +347,8 @@ final class Seccomp {
int errno = Native.getLastError();
if (errno == EINVAL) {
// friendly error, this will be the typical case for an old kernel
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with" +
" CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
} else {
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
}
@ -358,7 +360,8 @@ final class Seccomp {
default:
int errno = Native.getLastError();
if (errno == EINVAL) {
throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," +
" CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
} else {
throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno));
}
@ -368,7 +371,8 @@ final class Seccomp {
int errno = Native.getLastError();
switch (errno) {
case EFAULT: break; // available
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not" +
" compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
default: throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno));
}
}
@ -380,10 +384,12 @@ final class Seccomp {
// check it worked
if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) {
throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()));
throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " +
JNACLibrary.strerror(Native.getLastError()));
}
// BPF installed to check arch, limit, then syscall. See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
// BPF installed to check arch, limit, then syscall.
// See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
SockFilter insns[] = {
/* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), //
/* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail;
@ -408,7 +414,8 @@ final class Seccomp {
method = 0;
int errno1 = Native.getLastError();
if (logger.isDebugEnabled()) {
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...",
JNACLibrary.strerror(errno1));
}
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
int errno2 = Native.getLastError();
@ -419,7 +426,8 @@ final class Seccomp {
// now check that the filter was really installed, we should be in filter mode.
if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) {
throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + JNACLibrary.strerror(Native.getLastError()));
throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): "
+ JNACLibrary.strerror(Native.getLastError()));
}
logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app" );

View File

@ -24,17 +24,21 @@ import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.apache.logging.log4j.Level;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import java.io.Closeable;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Arrays;
/**
* An action to execute within a cli.
*/
public abstract class Command {
public abstract class Command implements Closeable {
/** A description of the command, used in the help output. */
protected final String description;
@ -44,15 +48,37 @@ public abstract class Command {
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp();
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output");
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output")
.availableUnless(silentOption);
private final OptionSpec<Void> verboseOption =
parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output").availableUnless(silentOption);
public Command(String description) {
this.description = description;
}
final SetOnce<Thread> shutdownHookThread = new SetOnce<>();
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal) throws Exception {
if (addShutdownHook()) {
shutdownHookThread.set(new Thread(() -> {
try {
this.close();
} catch (final IOException e) {
try (
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw)) {
e.printStackTrace(pw);
terminal.println(sw.toString());
} catch (final IOException impossible) {
// StringWriter#close declares a checked IOException from the Closeable interface but the Javadocs for StringWriter
// say that an exception here is impossible
throw new AssertionError(impossible);
}
}
}));
Runtime.getRuntime().addShutdownHook(shutdownHookThread.get());
}
// initialize default for es.logger.level because we will not read the log4j2.properties
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
@ -118,4 +144,19 @@ public abstract class Command {
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
/**
* Return whether or not to install the shutdown hook to cleanup resources on exit. This method should only be overridden in test
* classes.
*
* @return whether or not to install the shutdown hook
*/
protected boolean addShutdownHook() {
return true;
}
@Override
public void close() throws IOException {
}
}

View File

@ -22,16 +22,20 @@ package org.elasticsearch.cli;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import joptsimple.util.KeyValuePair;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
public abstract class SettingCommand extends Command {
/** A cli command which requires an {@link org.elasticsearch.env.Environment} to use current paths and settings. */
public abstract class EnvironmentAwareCommand extends Command {
private final OptionSpec<KeyValuePair> settingOption;
public SettingCommand(String description) {
public EnvironmentAwareCommand(String description) {
super(description);
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
}
@ -51,9 +55,15 @@ public abstract class SettingCommand extends Command {
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
execute(terminal, options, settings);
execute(terminal, options, createEnv(terminal, settings));
}
/** Create an {@link Environment} for the command to use. Overrideable for tests. */
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings);
}
/** Ensure the given setting exists, reading it from system properties if not already set. */
protected static void putSystemPropertyIfSettingIsMissing(final Map<String, String> settings, final String setting, final String key) {
final String value = System.getProperty(key);
if (value != null) {
@ -72,6 +82,7 @@ public abstract class SettingCommand extends Command {
}
}
protected abstract void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception;
/** Execute the command with the initialized {@link Environment}. */
protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;
}

View File

@ -147,8 +147,8 @@ public abstract class TransportClient extends AbstractClient {
modules.add(pluginModule);
}
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getClusterSettings(),
threadPool, pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),

View File

@ -16,20 +16,19 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.test.ESBackcompatTestCase;
import org.elasticsearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods;
package org.elasticsearch.cluster;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import org.elasticsearch.cluster.service.ClusterService;
public class SignificantTermsBackwardCompatibilityIT extends ESBackcompatTestCase {
/**
* A component that is in charge of applying an incoming cluster state to the node internal data structures.
* The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}.
*/
public interface ClusterStateApplier {
/**
* Test for streaming significant terms buckets to old es versions.
* Called when a new cluster state ({@link ClusterChangedEvent#state()} needs to be applied
*/
public void testAggregateAndCheckFromSeveralShards() throws IOException, ExecutionException, InterruptedException {
SharedSignificantTermsTestMethods.aggregateAndCheckFromSeveralShards(this);
}
void applyClusterState(ClusterChangedEvent event);
}

View File

@ -22,13 +22,12 @@ package org.elasticsearch.cluster;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.ClusterStateStatus;
import org.elasticsearch.cluster.service.ClusterServiceState;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
/**
* A utility class which simplifies interacting with the cluster state in cases where
@ -39,20 +38,14 @@ public class ClusterStateObserver {
protected final Logger logger;
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
@Override
public boolean apply(ClusterChangedEvent changedEvent) {
return changedEvent.previousState().version() != changedEvent.state().version();
}
};
private final Predicate<ClusterState> MATCH_ALL_CHANGES_PREDICATE = state -> true;
private final ClusterService clusterService;
private final ThreadContext contextHolder;
volatile TimeValue timeOutValue;
final AtomicReference<ClusterServiceState> lastObservedState;
final AtomicReference<ClusterState> lastObservedState;
final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener();
// observingContext is not null when waiting on cluster state changes
final AtomicReference<ObservingContext> observingContext = new AtomicReference<>(null);
@ -70,8 +63,17 @@ public class ClusterStateObserver {
* to wait indefinitely
*/
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
this(clusterService.state(), clusterService, timeout, logger, contextHolder);
}
/**
* @param timeout a global timeout for this observer. After it has expired the observer
* will fail any existing or new #waitForNextChange calls. Set to null
* to wait indefinitely
*/
public ClusterStateObserver(ClusterState initialState, ClusterService clusterService, @Nullable TimeValue timeout, Logger logger,
ThreadContext contextHolder) {
this.clusterService = clusterService;
this.lastObservedState = new AtomicReference<>(clusterService.clusterServiceState());
this.lastObservedState = new AtomicReference<>(initialState);
this.timeOutValue = timeout;
if (timeOutValue != null) {
this.startTimeNS = System.nanoTime();
@ -81,8 +83,8 @@ public class ClusterStateObserver {
}
/** last cluster state and status observed by this observer. Note that this may not be the current one */
public ClusterServiceState observedState() {
ClusterServiceState state = lastObservedState.get();
public ClusterState observedState() {
ClusterState state = lastObservedState.get();
assert state != null;
return state;
}
@ -100,18 +102,18 @@ public class ClusterStateObserver {
waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE, timeOutValue);
}
public void waitForNextChange(Listener listener, ChangePredicate changePredicate) {
waitForNextChange(listener, changePredicate, null);
public void waitForNextChange(Listener listener, Predicate<ClusterState> statePredicate) {
waitForNextChange(listener, statePredicate, null);
}
/**
* Wait for the next cluster state which satisfies changePredicate
* Wait for the next cluster state which satisfies statePredicate
*
* @param listener callback listener
* @param changePredicate predicate to check whether cluster state changes are relevant and the callback should be called
* @param statePredicate predicate to check whether cluster state changes are relevant and the callback should be called
* @param timeOutValue a timeout for waiting. If null the global observer timeout will be used.
*/
public void waitForNextChange(Listener listener, ChangePredicate changePredicate, @Nullable TimeValue timeOutValue) {
public void waitForNextChange(Listener listener, Predicate<ClusterState> statePredicate, @Nullable TimeValue timeOutValue) {
if (observingContext.get() != null) {
throw new ElasticsearchException("already waiting for a cluster state change");
@ -128,7 +130,7 @@ public class ClusterStateObserver {
logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
// update to latest, in case people want to retry
timedOut = true;
lastObservedState.set(clusterService.clusterServiceState());
lastObservedState.set(clusterService.state());
listener.onTimeout(timeOutValue);
return;
}
@ -143,33 +145,24 @@ public class ClusterStateObserver {
}
// sample a new state
ClusterServiceState newState = clusterService.clusterServiceState();
ClusterServiceState lastState = lastObservedState.get();
if (changePredicate.apply(lastState, newState)) {
ClusterState newState = clusterService.state();
ClusterState lastState = lastObservedState.get();
if (newState != lastState && statePredicate.test(newState)) {
// good enough, let's go.
logger.trace("observer: sampled state accepted by predicate ({})", newState);
lastObservedState.set(newState);
listener.onNewClusterState(newState.getClusterState());
listener.onNewClusterState(newState);
} else {
logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState);
ObservingContext context = new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), changePredicate);
ObservingContext context =
new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), statePredicate);
if (!observingContext.compareAndSet(null, context)) {
throw new ElasticsearchException("already waiting for a cluster state change");
}
clusterService.add(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener);
clusterService.addTimeoutListener(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener);
}
}
/**
* reset this observer to the give cluster state. Any pending waits will be canceled.
*/
public void reset(ClusterServiceState state) {
if (observingContext.getAndSet(null) != null) {
clusterService.remove(clusterStateListener);
}
lastObservedState.set(state);
}
class ObserverClusterStateListener implements TimeoutClusterStateListener {
@Override
@ -179,18 +172,18 @@ public class ClusterStateObserver {
// No need to remove listener as it is the responsibility of the thread that set observingContext to null
return;
}
if (context.changePredicate.apply(event)) {
final ClusterState state = event.state();
if (context.statePredicate.test(state)) {
if (observingContext.compareAndSet(context, null)) {
clusterService.remove(this);
ClusterServiceState state = new ClusterServiceState(event.state(), ClusterStateStatus.APPLIED);
clusterService.removeTimeoutListener(this);
logger.trace("observer: accepting cluster state change ({})", state);
lastObservedState.set(state);
context.listener.onNewClusterState(state.getClusterState());
context.listener.onNewClusterState(state);
} else {
logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", event.state().version());
logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version());
}
} else {
logger.trace("observer: predicate rejected change (new cluster state version [{}])", event.state().version());
logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version());
}
}
@ -201,15 +194,15 @@ public class ClusterStateObserver {
// No need to remove listener as it is the responsibility of the thread that set observingContext to null
return;
}
ClusterServiceState newState = clusterService.clusterServiceState();
ClusterServiceState lastState = lastObservedState.get();
if (context.changePredicate.apply(lastState, newState)) {
ClusterState newState = clusterService.state();
ClusterState lastState = lastObservedState.get();
if (newState != lastState && context.statePredicate.test(newState)) {
// double check we're still listening
if (observingContext.compareAndSet(context, null)) {
logger.trace("observer: post adding listener: accepting current cluster state ({})", newState);
clusterService.remove(this);
clusterService.removeTimeoutListener(this);
lastObservedState.set(newState);
context.listener.onNewClusterState(newState.getClusterState());
context.listener.onNewClusterState(newState);
} else {
logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState);
}
@ -224,7 +217,7 @@ public class ClusterStateObserver {
if (context != null) {
logger.trace("observer: cluster service closed. notifying listener.");
clusterService.remove(this);
clusterService.removeTimeoutListener(this);
context.listener.onClusterServiceClose();
}
}
@ -233,11 +226,11 @@ public class ClusterStateObserver {
public void onTimeout(TimeValue timeout) {
ObservingContext context = observingContext.getAndSet(null);
if (context != null) {
clusterService.remove(this);
clusterService.removeTimeoutListener(this);
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
// update to latest, in case people want to retry
lastObservedState.set(clusterService.clusterServiceState());
lastObservedState.set(clusterService.state());
timedOut = true;
context.listener.onTimeout(timeOutValue);
}
@ -255,58 +248,13 @@ public class ClusterStateObserver {
void onTimeout(TimeValue timeout);
}
public interface ChangePredicate {
/**
* a rough check used when starting to monitor for a new change. Called infrequently can be less accurate.
*
* @return true if newState should be accepted
*/
boolean apply(ClusterServiceState previousState,
ClusterServiceState newState);
/**
* called to see whether a cluster change should be accepted
*
* @return true if changedEvent.state() should be accepted
*/
boolean apply(ClusterChangedEvent changedEvent);
}
public abstract static class ValidationPredicate implements ChangePredicate {
@Override
public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) {
return (previousState.getClusterState() != newState.getClusterState() ||
previousState.getClusterStateStatus() != newState.getClusterStateStatus()) &&
validate(newState);
}
protected abstract boolean validate(ClusterServiceState newState);
@Override
public boolean apply(ClusterChangedEvent changedEvent) {
return changedEvent.previousState().version() != changedEvent.state().version() &&
validate(new ClusterServiceState(changedEvent.state(), ClusterStateStatus.APPLIED));
}
}
public abstract static class EventPredicate implements ChangePredicate {
@Override
public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) {
return previousState.getClusterState() != newState.getClusterState() || previousState.getClusterStateStatus() != newState.getClusterStateStatus();
}
}
static class ObservingContext {
public final Listener listener;
public final ChangePredicate changePredicate;
public final Predicate<ClusterState> statePredicate;
public ObservingContext(Listener listener, ChangePredicate changePredicate) {
public ObservingContext(Listener listener, Predicate<ClusterState> statePredicate) {
this.listener = listener;
this.changePredicate = changePredicate;
this.statePredicate = statePredicate;
}
}

View File

@ -19,11 +19,6 @@
package org.elasticsearch.cluster;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
@ -53,6 +48,11 @@ import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* InternalClusterInfoService provides the ClusterInfoService interface,
* routinely updated on a timer. The timer can be dynamically changed by
@ -64,7 +64,8 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException;
* Every time the timer runs, gathers information about the disk usage and
* shard sizes across the cluster.
*/
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public class InternalClusterInfoService extends AbstractComponent
implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING =
Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10),
@ -105,9 +106,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
// Add InternalClusterInfoService to listen for Master changes
this.clusterService.add((LocalNodeMasterListener)this);
this.clusterService.addLocalNodeMasterListener(this);
// Add to listen for state changes (when nodes are added)
this.clusterService.add((ClusterStateListener)this);
this.clusterService.addListener(this);
}
private void setEnabled(boolean enabled) {
@ -167,7 +168,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
}
}
if (this.isMaster && dataNodeAdded && clusterService.state().getNodes().getDataNodes().size() > 1) {
if (this.isMaster && dataNodeAdded && event.state().getNodes().getDataNodes().size() > 1) {
if (logger.isDebugEnabled()) {
logger.debug("data node was added, retrieving new cluster info");
}

View File

@ -19,23 +19,32 @@
package org.elasticsearch.cluster;
import org.elasticsearch.cluster.service.ClusterServiceState;
import java.util.function.Predicate;
public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate {
INSTANCE;
public final class MasterNodeChangePredicate {
private MasterNodeChangePredicate() {
@Override
public boolean apply(
ClusterServiceState previousState,
ClusterServiceState newState) {
// checking if the masterNodeId changed is insufficient as the
// same master node might get re-elected after a disruption
return newState.getClusterState().nodes().getMasterNodeId() != null &&
newState.getClusterState() != previousState.getClusterState();
}
@Override
public boolean apply(ClusterChangedEvent changedEvent) {
return changedEvent.nodesDelta().masterNodeChanged();
/**
* builds a predicate that will accept a cluster state only if it was generated after the current has
* (re-)joined the master
*/
public static Predicate<ClusterState> build(ClusterState currentState) {
final long currentVersion = currentState.version();
final String currentMaster = currentState.nodes().getMasterNodeId();
return newState -> {
final String newMaster = newState.nodes().getMasterNodeId();
final boolean accept;
if (newMaster == null) {
accept = false;
} else if (newMaster.equals(currentMaster) == false){
accept = true;
} else {
accept = newState.version() > currentVersion;
}
return accept;
};
}
}

View File

@ -92,7 +92,7 @@ public class ShardStateAction extends AbstractComponent {
}
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) {
DiscoveryNode masterNode = observer.observedState().getClusterState().nodes().getMasterNode();
DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode();
if (masterNode == null) {
logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry);
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
@ -142,18 +142,27 @@ public class ShardStateAction extends AbstractComponent {
*/
public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
assert primaryTerm > 0L : "primary term should be strictly positive";
shardFailed(shardId, allocationId, primaryTerm, message, failure, listener);
shardFailed(shardId, allocationId, primaryTerm, message, failure, listener, clusterService.state());
}
/**
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
*/
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) {
shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener);
localShardFailed(shardRouting, message, failure, listener, clusterService.state());
}
private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
/**
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
*/
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener,
final ClusterState currentState) {
shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener, currentState);
}
private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message,
@Nullable final Exception failure, Listener listener, ClusterState currentState) {
ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure);
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
}
@ -180,7 +189,7 @@ public class ShardStateAction extends AbstractComponent {
// we wait indefinitely for a new master
assert false;
}
}, MasterNodeChangePredicate.INSTANCE);
}, MasterNodeChangePredicate.build(observer.observedState()));
}
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
@ -342,7 +351,10 @@ public class ShardStateAction extends AbstractComponent {
}
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
shardStarted(shardRouting, message, listener, clusterService.state());
}
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener, ClusterState currentState) {
ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null);
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener);
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.InvalidAliasNameException;
import java.io.IOException;
import java.util.Optional;
import java.util.function.Function;
/**
@ -139,10 +138,8 @@ public class AliasValidator extends AbstractComponent {
private static void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
QueryParseContext queryParseContext = queryShardContext.newParseContext(parser);
Optional<QueryBuilder> parseInnerQueryBuilder = queryParseContext.parseInnerQueryBuilder();
if (parseInnerQueryBuilder.isPresent()) {
QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(parseInnerQueryBuilder.get(), queryShardContext);
queryBuilder.toFilter(queryShardContext);
}
QueryBuilder parseInnerQueryBuilder = queryParseContext.parseInnerQueryBuilder();
QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(parseInnerQueryBuilder, queryShardContext);
queryBuilder.toFilter(queryShardContext);
}
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -43,7 +44,6 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
/**
* A collection of tombstones for explicitly marking indices as deleted in the cluster state.
@ -367,7 +367,7 @@ public final class IndexGraveyard implements MetaData.Custom {
TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY));
}
static BiFunction<XContentParser, ParseFieldMatcherSupplier, Tombstone> getParser() {
static ContextParser<ParseFieldMatcherSupplier, Tombstone> getParser() {
return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build();
}

Some files were not shown because too many files have changed in this diff Show More