Merge branch 'master' into feature/rank-eval

This commit is contained in:
Isabel Drost-Fromm 2016-12-13 10:45:22 +01:00
commit b2e8455745
458 changed files with 6480 additions and 7207 deletions

View File

@ -161,10 +161,20 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
if (false == test.continued) {
current.println('---')
current.println("\"line_$test.start\":")
/* The Elasticsearch test runner doesn't support the warnings
* construct unless you output this skip. Since we don't know
* if this snippet will use the warnings construct we emit this
* warning every time. */
current.println(" - skip:")
current.println(" features: ")
current.println(" - warnings")
}
if (test.skipTest) {
current.println(" - skip:")
current.println(" features: always_skip")
if (test.continued) {
throw new InvalidUserDataException("Continued snippets "
+ "can't be skipped")
}
current.println(" - always_skip")
current.println(" reason: $test.skipTest")
}
if (test.setup != null) {

View File

@ -59,11 +59,16 @@ public class ForbiddenPatternsTask extends DefaultTask {
filesFilter.exclude('**/*.png')
// add mandatory rules
patterns.put('nocommit', /nocommit/)
patterns.put('nocommit', /nocommit|NOCOMMIT/)
patterns.put('nocommit should be all lowercase or all uppercase',
/((?i)nocommit)(?<!(nocommit|NOCOMMIT))/)
patterns.put('tab', /\t/)
if (System.getProperty('build.snapshot', 'true').equals('false')) {
patterns.put('norelease', /norelease/)
patterns.put('norelease', /norelease|NORELEASE/)
}
patterns.put('norelease should be all lowercase or all uppercase',
/((?i)norelease)(?<!(norelease|NORELEASE))/)
inputs.property("excludes", filesFilter.excludes)
inputs.property("rules", patterns)

View File

@ -37,6 +37,7 @@ import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.StringJoiner;
/**
* Helper class that exposes static methods to unify the way requests are logged.
@ -59,6 +60,12 @@ final class RequestLogger {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
"] returned [" + httpResponse.getStatusLine() + "]");
}
if (logger.isWarnEnabled()) {
Header[] warnings = httpResponse.getHeaders("Warning");
if (warnings != null && warnings.length > 0) {
logger.warn(buildWarningMessage(request, host, warnings));
}
}
if (tracer.isTraceEnabled()) {
String requestLine;
try {
@ -97,6 +104,18 @@ final class RequestLogger {
}
}
static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[] warnings) {
StringBuilder message = new StringBuilder("request [").append(request.getMethod()).append(" ").append(host)
.append(getUri(request.getRequestLine())).append("] returned ").append(warnings.length).append(" warnings: ");
for (int i = 0; i < warnings.length; i++) {
if (i > 0) {
message.append(",");
}
message.append("[").append(warnings[i].getValue()).append("]");
}
return message.toString();
}
/**
* Creates curl output for given request
*/

View File

@ -23,6 +23,10 @@ package org.elasticsearch.client;
* Listener to be provided when calling async performRequest methods provided by {@link RestClient}.
* Those methods that do accept a listener will return immediately, execute asynchronously, and notify
* the listener whenever the request yielded a response, or failed with an exception.
*
* <p>
* Note that it is <strong>not</strong> safe to call {@link RestClient#close()} from either of these
* callbacks.
*/
public interface ResponseListener {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpHost;
@ -29,10 +29,11 @@ import org.apache.http.client.methods.HttpOptions;
import org.apache.http.client.methods.HttpPatch;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.entity.NByteArrayEntity;
@ -46,13 +47,13 @@ import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
public class RequestLoggerTests extends RestClientTestCase {
public void testTraceRequest() throws IOException, URISyntaxException {
HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https");
String expectedEndpoint = "/index/type/_api";
URI uri;
if (randomBoolean()) {
@ -60,46 +61,15 @@ public class RequestLoggerTests extends RestClientTestCase {
} else {
uri = new URI("index/type/_api");
}
HttpRequestBase request;
int requestType = RandomNumbers.randomIntBetween(getRandom(), 0, 7);
switch(requestType) {
case 0:
request = new HttpGetWithEntity(uri);
break;
case 1:
request = new HttpPost(uri);
break;
case 2:
request = new HttpPut(uri);
break;
case 3:
request = new HttpDeleteWithEntity(uri);
break;
case 4:
request = new HttpHead(uri);
break;
case 5:
request = new HttpTrace(uri);
break;
case 6:
request = new HttpOptions(uri);
break;
case 7:
request = new HttpPatch(uri);
break;
default:
throw new UnsupportedOperationException();
}
HttpUriRequest request = randomHttpRequest(uri);
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean();
String requestBody = "{ \"field\": \"value\" }";
if (hasBody) {
expected += " -d '" + requestBody + "'";
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
HttpEntity entity;
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
switch(randomIntBetween(0, 3)) {
case 0:
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
break;
@ -128,12 +98,12 @@ public class RequestLoggerTests extends RestClientTestCase {
public void testTraceResponse() throws IOException {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
int statusCode = RandomNumbers.randomIntBetween(getRandom(), 200, 599);
int statusCode = randomIntBetween(200, 599);
String reasonPhrase = "REASON";
BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase);
String expected = "# " + statusLine.toString();
BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine);
int numHeaders = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
int numHeaders = randomIntBetween(0, 3);
for (int i = 0; i < numHeaders; i++) {
httpResponse.setHeader("header" + i, "value");
expected += "\n# header" + i + ": value";
@ -162,4 +132,46 @@ public class RequestLoggerTests extends RestClientTestCase {
assertThat(body, equalTo(responseBody));
}
}
public void testResponseWarnings() throws Exception {
HttpHost host = new HttpHost("localhost", 9200);
HttpUriRequest request = randomHttpRequest(new URI("/index/type/_api"));
int numWarnings = randomIntBetween(1, 5);
StringBuilder expected = new StringBuilder("request [").append(request.getMethod()).append(" ").append(host)
.append("/index/type/_api] returned ").append(numWarnings).append(" warnings: ");
Header[] warnings = new Header[numWarnings];
for (int i = 0; i < numWarnings; i++) {
String warning = "this is warning number " + i;
warnings[i] = new BasicHeader("Warning", warning);
if (i > 0) {
expected.append(",");
}
expected.append("[").append(warning).append("]");
}
assertEquals(expected.toString(), RequestLogger.buildWarningMessage(request, host, warnings));
}
private static HttpUriRequest randomHttpRequest(URI uri) {
int requestType = randomIntBetween(0, 7);
switch(requestType) {
case 0:
return new HttpGetWithEntity(uri);
case 1:
return new HttpPost(uri);
case 2:
return new HttpPut(uri);
case 3:
return new HttpDeleteWithEntity(uri);
case 4:
return new HttpHead(uri);
case 5:
return new HttpTrace(uri);
case 6:
return new HttpOptions(uri);
case 7:
return new HttpPatch(uri);
default:
throw new UnsupportedOperationException();
}
}
}

View File

@ -35,12 +35,6 @@ public class Version {
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
*/
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_beta2_ID = 2000002;
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_rc1_ID = 2000051;
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_ID = 2000099;
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_1_ID = 2000199;
@ -102,6 +96,8 @@ public class Version {
// no version constant for 5.1.0 due to inadvertent release
public static final int V_5_1_1_ID_UNRELEASED = 5010199;
public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_5_1_2_ID_UNRELEASED = 5010299;
public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
@ -126,6 +122,8 @@ public class Version {
return V_6_0_0_alpha1_UNRELEASED;
case V_5_2_0_ID_UNRELEASED:
return V_5_2_0_UNRELEASED;
case V_5_1_2_ID_UNRELEASED:
return V_5_1_2_UNRELEASED;
case V_5_1_1_ID_UNRELEASED:
return V_5_1_1_UNRELEASED;
case V_5_0_3_ID_UNRELEASED:
@ -186,12 +184,6 @@ public class Version {
return V_2_0_1;
case V_2_0_0_ID:
return V_2_0_0;
case V_2_0_0_rc1_ID:
return V_2_0_0_rc1;
case V_2_0_0_beta2_ID:
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
default:
return new Version(id, org.apache.lucene.util.Version.LATEST);
}

View File

@ -19,20 +19,14 @@
package org.elasticsearch.action;
import java.io.IOException;
import org.elasticsearch.common.CheckedConsumer;
import java.util.function.Consumer;
/**
* A listener for action responses or failures.
*/
public interface ActionListener<Response> {
/** A consumer interface which allows throwing checked exceptions. */
@FunctionalInterface
interface CheckedConsumer<T> {
void accept(T t) throws Exception;
}
/**
* Handle action response. This response may constitute a failure or a
* success but it is up to the listener to make that decision.
@ -53,7 +47,8 @@ public interface ActionListener<Response> {
* @param <Response> the type of the response
* @return a listener that listens for responses and invokes the consumer when received
*/
static <Response> ActionListener<Response> wrap(CheckedConsumer<Response> onResponse, Consumer<Exception> onFailure) {
static <Response> ActionListener<Response> wrap(CheckedConsumer<Response, ? extends Exception> onResponse,
Consumer<Exception> onFailure) {
return new ActionListener<Response>() {
@Override
public void onResponse(Response response) {

View File

@ -105,7 +105,15 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
DiscoveryNode node = clusterService.state().nodes().get(request.getTaskId().getNodeId());
if (node == null) {
// Node is no longer part of the cluster! Try and look the task up from the results index.
getFinishedTaskFromIndex(thisTask, request, listener);
getFinishedTaskFromIndex(thisTask, request, ActionListener.wrap(listener::onResponse, e -> {
if (e instanceof ResourceNotFoundException) {
e = new ResourceNotFoundException(
"task [" + request.getTaskId() + "] belongs to the node [" + request.getTaskId().getNodeId()
+ "] which isn't part of the cluster and there is no record of the task",
e);
}
listener.onFailure(e);
}));
return;
}
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
@ -231,7 +239,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
*/
void onGetFinishedTaskFromIndex(GetResponse response, ActionListener<GetTaskResponse> listener) throws IOException {
if (false == response.isExists()) {
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", response.getId()));
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", response.getId()));
return;
}
if (response.isSourceEmpty()) {

View File

@ -48,9 +48,10 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -73,6 +74,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
* the one with the least casts.
*/
final List<DocWriteRequest> requests = new ArrayList<>();
private final Set<String> indices = new HashSet<>();
List<Object> payloads = null;
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
@ -114,6 +116,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
} else {
throw new IllegalArgumentException("No support for request [" + request + "]");
}
indices.add(request.index());
return this;
}
@ -145,6 +148,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
addPayload(payload);
// lack of source is validated in validate() method
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
@ -172,6 +176,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
if (request.script() != null) {
sizeInBytes += request.script().getIdOrCode().length() * 2;
}
indices.add(request.index());
return this;
}
@ -187,6 +192,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
requests.add(request);
addPayload(payload);
sizeInBytes += REQUEST_OVERHEAD;
indices.add(request.index());
return this;
}
@ -548,4 +554,10 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
refreshPolicy.writeTo(out);
timeout.writeTo(out);
}
@Override
public String getDescription() {
return "requests[" + requests.size() + "], indices[" + Strings.collectionToDelimitedString(indices, ", ") + "]";
}
}

View File

@ -100,6 +100,11 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
return b.toString();
}
@Override
public String getDescription() {
return "requests[" + items.length + "], index[" + index + "]";
}
@Override
public void onRetry() {
for (BulkItemRequest item : items) {

View File

@ -156,8 +156,8 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
}
public static Engine.DeleteResult executeDeleteRequestOnReplica(DeleteRequest request, IndexShard replica) {
final Engine.Delete delete =
replica.prepareDeleteOnReplica(request.type(), request.id(), request.seqNo(), request.version(), request.versionType());
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
request.seqNo(), request.primaryTerm(), request.version(), request.versionType());
return replica.delete(delete);
}

View File

@ -24,10 +24,8 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
@ -74,41 +72,39 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
this.indexConstraints = indexConstraints;
}
public void source(BytesReference content) throws IOException {
public void source(XContentParser parser) throws IOException {
List<IndexConstraint> indexConstraints = new ArrayList<>();
List<String> fields = new ArrayList<>();
try (XContentParser parser = XContentHelper.createParser(content)) {
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.START_OBJECT;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
break;
case START_OBJECT:
if ("index_constraints".equals(fieldName)) {
parseIndexConstraints(indexConstraints, parser);
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
case START_ARRAY:
if ("fields".equals(fieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue()) {
fields.add(parser.text());
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
String fieldName = null;
Token token = parser.nextToken();
assert token == Token.START_OBJECT;
for (token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
switch (token) {
case FIELD_NAME:
fieldName = parser.currentName();
break;
case START_OBJECT:
if ("index_constraints".equals(fieldName)) {
parseIndexConstraints(indexConstraints, parser);
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
case START_ARRAY:
if ("fields".equals(fieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue()) {
fields.add(parser.text());
} else {
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
default:
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
} else {
throw new IllegalArgumentException("unknown field [" + fieldName + "]");
}
break;
default:
throw new IllegalArgumentException("unexpected token [" + token + "]");
}
}
this.fields = fields.toArray(new String[fields.size()]);

View File

@ -30,13 +30,10 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
@ -317,32 +314,19 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
return this;
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, data, true);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, BytesReference data, boolean allowExplicitIndex) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, null, data, allowExplicitIndex);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, BytesReference data, boolean allowExplicitIndex) throws IOException {
try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex);
} else if ("ids".equals(currentFieldName)) {
parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting);
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields,
@Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, XContentParser parser,
boolean allowExplicitIndex) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex);
} else if ("ids".equals(currentFieldName)) {
parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting);
}
}
}

View File

@ -140,9 +140,15 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
validationException = addValidationError("source is missing", validationException);
}
final long resolvedVersion = resolveVersionDefaults();
if (opType() == OpType.CREATE) {
if (versionType != VersionType.INTERNAL || version != Versions.MATCH_DELETED) {
validationException = addValidationError("create operations do not support versioning. use index instead", validationException);
if (versionType != VersionType.INTERNAL) {
validationException = addValidationError("create operations only support internal versioning. use index instead", validationException);
return validationException;
}
if (resolvedVersion != Versions.MATCH_DELETED) {
validationException = addValidationError("create operations do not support explicit versions. use index instead", validationException);
return validationException;
}
}
@ -151,8 +157,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
addValidationError("an id is required for a " + opType() + " operation", validationException);
}
if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
if (!versionType.validateVersionForWrites(resolvedVersion)) {
validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" + versionType.name() + "]", validationException);
}
if (versionType == VersionType.FORCE) {
@ -164,7 +170,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
id.getBytes(StandardCharsets.UTF_8).length, validationException);
}
if (id == null && (versionType == VersionType.INTERNAL && version == Versions.MATCH_ANY) == false) {
if (id == null && (versionType == VersionType.INTERNAL && resolvedVersion == Versions.MATCH_ANY) == false) {
validationException = addValidationError("an id must be provided if version type or value are set", validationException);
}
@ -387,10 +393,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
throw new IllegalArgumentException("opType must be 'create' or 'index', found: [" + opType + "]");
}
this.opType = opType;
if (opType == OpType.CREATE) {
version(Versions.MATCH_DELETED);
versionType(VersionType.INTERNAL);
}
return this;
}
@ -433,9 +435,24 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
return this;
}
/**
* Returns stored version. If currently stored version is {@link Versions#MATCH_ANY} and
* opType is {@link OpType#CREATE}, returns {@link Versions#MATCH_DELETED}.
*/
@Override
public long version() {
return this.version;
return resolveVersionDefaults();
}
/**
* Resolves the version based on operation type {@link #opType()}.
*/
private long resolveVersionDefaults() {
if (opType == OpType.CREATE && version == Versions.MATCH_ANY) {
return Versions.MATCH_DELETED;
} else {
return version;
}
}
@Override
@ -512,7 +529,12 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
}
out.writeBytesReference(source);
out.writeByte(opType.getId());
out.writeLong(version);
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
if (out.getVersion().before(Version.V_5_1_2_UNRELEASED)) {
out.writeLong(resolveVersionDefaults());
} else {
out.writeLong(version);
}
out.writeByte(versionType.getValue());
out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);

View File

@ -263,4 +263,3 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
}
}

View File

@ -26,10 +26,8 @@ import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
@ -88,43 +86,41 @@ public class MultiTermVectorsRequest extends ActionRequest implements Iterable<T
return requests;
}
public void add(TermVectorsRequest template, BytesReference data) throws IOException {
public void add(TermVectorsRequest template, @Nullable XContentParser parser) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
if (data.length() > 0) {
try (XContentParser parser = XContentFactory.xContent(data).createParser(data)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("docs array element should include an object");
}
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
add(termVectorsRequest);
if (parser != null) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if ("docs".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("docs array element should include an object");
}
} else if ("ids".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
ids.add(parser.text());
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
add(termVectorsRequest);
}
} else if ("ids".equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type ARRAY", currentFieldName);
ids.add(parser.text());
}
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
if ("parameters".equals(currentFieldName)) {
TermVectorsRequest.parseRequest(template, parser);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type OBJECT", currentFieldName);
}
} else if (currentFieldName != null) {
throw new ElasticsearchParseException("_mtermvectors: Parameter [{}] not supported", currentFieldName);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type ARRAY", currentFieldName);
}
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
if ("parameters".equals(currentFieldName)) {
TermVectorsRequest.parseRequest(template, parser);
} else {
throw new ElasticsearchParseException("no parameter named [{}] and type OBJECT", currentFieldName);
}
} else if (currentFieldName != null) {
throw new ElasticsearchParseException("_mtermvectors: Parameter [{}] not supported", currentFieldName);
}
}
}

View File

@ -55,6 +55,7 @@ import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@ -215,8 +216,8 @@ final class Bootstrap {
@Override
protected void validateNodeBeforeAcceptingRequests(
final Settings settings,
final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
BootstrapCheck.check(settings, boundTransportAddress);
final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> checks) throws NodeValidationException {
BootstrapChecks.check(settings, boundTransportAddress, checks);
}
};
}

View File

@ -19,612 +19,27 @@
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is
* running in production and all bootstrap checks must pass.
* Encapsulates a bootstrap check.
*/
final class BootstrapCheck {
private BootstrapCheck() {
}
public interface BootstrapCheck {
/**
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface.
* Test if the node fails the check.
*
* @param settings the current node settings
* @param boundTransportAddress the node network bindings
* @return {@code true} if the node failed the check
*/
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
check(
enforceLimits(boundTransportAddress),
checks(settings),
Node.NODE_NAME_SETTING.get(settings));
}
boolean check();
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
* The error message for a failed check.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param nodeName the node name to be used as a logging prefix
* @return the error message on check failure
*/
static void check(
final boolean enforceLimits,
final List<Check> checks,
final String nodeName) throws NodeValidationException {
check(enforceLimits, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param logger the logger to
*/
static void check(
final boolean enforceLimits,
final List<Check> checks,
final Logger logger) throws NodeValidationException {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
for (final Check check : checks) {
if (check.check()) {
if (!enforceLimits && !check.alwaysEnforce()) {
ignoredErrors.add(check.errorMessage());
} else {
errors.add(check.errorMessage());
}
}
}
if (!ignoredErrors.isEmpty()) {
ignoredErrors.forEach(error -> log(logger, error));
}
if (!errors.isEmpty()) {
final List<String> messages = new ArrayList<>(1 + errors.size());
messages.add("bootstrap checks failed");
messages.addAll(errors);
final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
throw ne;
}
}
static void log(final Logger logger, final String error) {
logger.warn(error);
}
/**
* Tests if the checks should be enforced.
*
* @param boundTransportAddress the node network bindings
* @return {@code true} if the checks should be enforced
*/
static boolean enforceLimits(BoundTransportAddress boundTransportAddress) {
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress()
|| t.address().getAddress().isLoopbackAddress();
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
}
// the list of checks to execute
static List<Check> checks(final Settings settings) {
final List<Check> checks = new ArrayList<>();
checks.add(new HeapSizeCheck());
final FileDescriptorCheck fileDescriptorCheck
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings)));
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
if (Constants.LINUX) {
checks.add(new MaxMapCountCheck());
}
checks.add(new ClientJvmCheck());
checks.add(new UseSerialGCCheck());
checks.add(new SystemCallFilterCheck(BootstrapSettings.SECCOMP_SETTING.get(settings)));
checks.add(new OnErrorCheck());
checks.add(new OnOutOfMemoryErrorCheck());
checks.add(new G1GCCheck());
return Collections.unmodifiableList(checks);
}
/**
* Encapsulates a bootstrap check.
*/
interface Check {
/**
* Test if the node fails the check.
*
* @return {@code true} if the node failed the check
*/
boolean check();
/**
* The error message for a failed check.
*
* @return the error message on check failure
*/
String errorMessage();
default boolean alwaysEnforce() {
return false;
}
}
static class HeapSizeCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
final long initialHeapSize = getInitialHeapSize();
final long maxHeapSize = getMaxHeapSize();
return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"initial heap size [%d] not equal to maximum heap size [%d]; " +
"this can cause resize pauses and prevents mlockall from locking the entire heap",
getInitialHeapSize(),
getMaxHeapSize()
);
}
// visible for testing
long getInitialHeapSize() {
return JvmInfo.jvmInfo().getConfiguredInitialHeapSize();
}
// visible for testing
long getMaxHeapSize() {
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
}
}
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
public OsXFileDescriptorCheck() {
// see constant OPEN_MAX defined in
// /usr/include/sys/syslimits.h on OS X and its use in JVM
// initialization in int os:init_2(void) defined in the JVM
// code for BSD (contains OS X)
super(10240);
}
}
static class FileDescriptorCheck implements Check {
private final int limit;
FileDescriptorCheck() {
this(1 << 16);
}
protected FileDescriptorCheck(final int limit) {
if (limit <= 0) {
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
}
this.limit = limit;
}
public final boolean check() {
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
}
@Override
public final String errorMessage() {
return String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
);
}
// visible for testing
long getMaxFileDescriptorCount() {
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
}
}
static class MlockallCheck implements Check {
private final boolean mlockallSet;
public MlockallCheck(final boolean mlockAllSet) {
this.mlockallSet = mlockAllSet;
}
@Override
public boolean check() {
return mlockallSet && !isMemoryLocked();
}
@Override
public String errorMessage() {
return "memory locking requested for elasticsearch process but memory is not locked";
}
// visible for testing
boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}
static class MaxNumberOfThreadsCheck implements Check {
// this should be plenty for machines up to 256 cores
private final long maxNumberOfThreadsThreshold = 1 << 12;
@Override
public boolean check() {
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max number of threads [%d] for user [%s] is too low, increase to at least [%d]",
getMaxNumberOfThreads(),
BootstrapInfo.getSystemProperties().get("user.name"),
maxNumberOfThreadsThreshold);
}
// visible for testing
long getMaxNumberOfThreads() {
return JNANatives.MAX_NUMBER_OF_THREADS;
}
}
static class MaxSizeVirtualMemoryCheck implements Check {
@Override
public boolean check() {
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]",
getMaxSizeVirtualMemory(),
BootstrapInfo.getSystemProperties().get("user.name"));
}
// visible for testing
long getRlimInfinity() {
return JNACLibrary.RLIM_INFINITY;
}
// visible for testing
long getMaxSizeVirtualMemory() {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
}
}
static class MaxMapCountCheck implements Check {
private final long limit = 1 << 18;
@Override
public boolean check() {
return getMaxMapCount() != -1 && getMaxMapCount() < limit;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
getMaxMapCount(),
limit);
}
// visible for testing
long getMaxMapCount() {
return getMaxMapCount(Loggers.getLogger(BootstrapCheck.class));
}
// visible for testing
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
if (rawProcSysVmMaxMapCount != null) {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
}
}
} catch (final IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
private Path getProcSysVmMaxMapCountPath() {
return PathUtils.get("/proc/sys/vm/max_map_count");
}
// visible for testing
BufferedReader getBufferedReader(final Path path) throws IOException {
return Files.newBufferedReader(path);
}
// visible for testing
String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
return bufferedReader.readLine();
}
// visible for testing
long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
return Long.parseLong(procSysVmMaxMapCount);
}
}
static class ClientJvmCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return getVmName().toLowerCase(Locale.ROOT).contains("client");
}
// visible for testing
String getVmName() {
return JvmInfo.jvmInfo().getVmName();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the client VM [%s] but should be using a server VM for the best performance",
getVmName());
}
}
/**
* Checks if the serial collector is in use. This collector is single-threaded and devastating
* for performance and should not be used for a server application like Elasticsearch.
*/
static class UseSerialGCCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return getUseSerialGC().equals("true");
}
// visible for testing
String getUseSerialGC() {
return JvmInfo.jvmInfo().useSerialGC();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the serial collector but should not be for the best performance; " +
"either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified",
JvmInfo.jvmInfo().getVmName());
}
}
/**
* Bootstrap check that if system call filters are enabled, then system call filters must have installed successfully.
*/
static class SystemCallFilterCheck implements BootstrapCheck.Check {
private final boolean areSystemCallFiltersEnabled;
SystemCallFilterCheck(final boolean areSystemCallFiltersEnabled) {
this.areSystemCallFiltersEnabled = areSystemCallFiltersEnabled;
}
@Override
public boolean check() {
return areSystemCallFiltersEnabled && !isSeccompInstalled();
}
// visible for testing
boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
@Override
public String errorMessage() {
return "system call filters failed to install; " +
"check the logs and fix your configuration or disable system call filters at your own risk";
}
}
abstract static class MightForkCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
return isSeccompInstalled() && mightFork();
}
// visible for testing
boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
// visible for testing
abstract boolean mightFork();
@Override
public final boolean alwaysEnforce() {
return true;
}
}
static class OnErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onError = onError();
return onError != null && !onError.equals("");
}
// visible for testing
String onError() {
return JvmInfo.jvmInfo().onError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
static class OnOutOfMemoryErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onOutOfMemoryError = onOutOfMemoryError();
return onOutOfMemoryError != null && !onOutOfMemoryError.equals("");
}
// visible for testing
String onOutOfMemoryError() {
return JvmInfo.jvmInfo().onOutOfMemoryError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onOutOfMemoryError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
/**
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
*/
static class G1GCCheck implements BootstrapCheck.Check {
@Override
public boolean check() {
if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) {
final String jvmVersion = jvmVersion();
// HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223
final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+");
final Matcher matcher = pattern.matcher(jvmVersion);
final boolean matches = matcher.matches();
assert matches : jvmVersion;
final int major = Integer.parseInt(matcher.group(1));
final int update = Integer.parseInt(matcher.group(2));
// HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40
return major == 25 && update < 40;
} else {
return false;
}
}
// visible for testing
String jvmVendor() {
return Constants.JVM_VENDOR;
}
// visible for testing
boolean isG1GCEnabled() {
assert "Oracle Corporation".equals(jvmVendor());
return JvmInfo.jvmInfo().useG1GC().equals("true");
}
// visible for testing
String jvmVersion() {
assert "Oracle Corporation".equals(jvmVendor());
return Constants.JVM_VERSION;
}
// visible for testing
boolean isJava8() {
assert "Oracle Corporation".equals(jvmVendor());
return JavaVersion.current().equals(JavaVersion.parse("1.8"));
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion());
}
String errorMessage();
default boolean alwaysEnforce() {
return false;
}
}

View File

@ -0,0 +1,609 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.function.Predicate;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface. In this case we assume the node is
* running in production and all bootstrap checks must pass.
*/
final class BootstrapChecks {
private BootstrapChecks() {
}
/**
* Executes the bootstrap checks if the node has the transport protocol bound to a non-loopback interface.
*
* @param settings the current node settings
* @param boundTransportAddress the node network bindings
*/
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> additionalChecks)
throws NodeValidationException {
final List<BootstrapCheck> builtInChecks = checks(settings);
final List<BootstrapCheck> combinedChecks = new ArrayList<>(builtInChecks);
combinedChecks.addAll(additionalChecks);
check(
enforceLimits(boundTransportAddress),
Collections.unmodifiableList(combinedChecks),
Node.NODE_NAME_SETTING.get(settings));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param nodeName the node name to be used as a logging prefix
*/
static void check(
final boolean enforceLimits,
final List<BootstrapCheck> checks,
final String nodeName) throws NodeValidationException {
check(enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class, nodeName));
}
/**
* Executes the provided checks and fails the node if {@code enforceLimits} is {@code true}, otherwise logs warnings.
*
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
* @param checks the checks to execute
* @param logger the logger to
*/
static void check(
final boolean enforceLimits,
final List<BootstrapCheck> checks,
final Logger logger) throws NodeValidationException {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
for (final BootstrapCheck check : checks) {
if (check.check()) {
if (!enforceLimits && !check.alwaysEnforce()) {
ignoredErrors.add(check.errorMessage());
} else {
errors.add(check.errorMessage());
}
}
}
if (!ignoredErrors.isEmpty()) {
ignoredErrors.forEach(error -> log(logger, error));
}
if (!errors.isEmpty()) {
final List<String> messages = new ArrayList<>(1 + errors.size());
messages.add("bootstrap checks failed");
messages.addAll(errors);
final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
throw ne;
}
}
static void log(final Logger logger, final String error) {
logger.warn(error);
}
/**
* Tests if the checks should be enforced.
*
* @param boundTransportAddress the node network bindings
* @return {@code true} if the checks should be enforced
*/
static boolean enforceLimits(BoundTransportAddress boundTransportAddress) {
Predicate<TransportAddress> isLoopbackOrLinkLocalAddress = t -> t.address().getAddress().isLinkLocalAddress()
|| t.address().getAddress().isLoopbackAddress();
return !(Arrays.stream(boundTransportAddress.boundAddresses()).allMatch(isLoopbackOrLinkLocalAddress) &&
isLoopbackOrLinkLocalAddress.test(boundTransportAddress.publishAddress()));
}
// the list of checks to execute
static List<BootstrapCheck> checks(final Settings settings) {
final List<BootstrapCheck> checks = new ArrayList<>();
checks.add(new HeapSizeCheck());
final FileDescriptorCheck fileDescriptorCheck
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings)));
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
if (Constants.LINUX) {
checks.add(new MaxMapCountCheck());
}
checks.add(new ClientJvmCheck());
checks.add(new UseSerialGCCheck());
checks.add(new SystemCallFilterCheck(BootstrapSettings.SECCOMP_SETTING.get(settings)));
checks.add(new OnErrorCheck());
checks.add(new OnOutOfMemoryErrorCheck());
checks.add(new G1GCCheck());
return Collections.unmodifiableList(checks);
}
static class HeapSizeCheck implements BootstrapCheck {
@Override
public boolean check() {
final long initialHeapSize = getInitialHeapSize();
final long maxHeapSize = getMaxHeapSize();
return initialHeapSize != 0 && maxHeapSize != 0 && initialHeapSize != maxHeapSize;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"initial heap size [%d] not equal to maximum heap size [%d]; " +
"this can cause resize pauses and prevents mlockall from locking the entire heap",
getInitialHeapSize(),
getMaxHeapSize()
);
}
// visible for testing
long getInitialHeapSize() {
return JvmInfo.jvmInfo().getConfiguredInitialHeapSize();
}
// visible for testing
long getMaxHeapSize() {
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
}
}
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
public OsXFileDescriptorCheck() {
// see constant OPEN_MAX defined in
// /usr/include/sys/syslimits.h on OS X and its use in JVM
// initialization in int os:init_2(void) defined in the JVM
// code for BSD (contains OS X)
super(10240);
}
}
static class FileDescriptorCheck implements BootstrapCheck {
private final int limit;
FileDescriptorCheck() {
this(1 << 16);
}
protected FileDescriptorCheck(final int limit) {
if (limit <= 0) {
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
}
this.limit = limit;
}
public final boolean check() {
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
}
@Override
public final String errorMessage() {
return String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process is too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
);
}
// visible for testing
long getMaxFileDescriptorCount() {
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
}
}
static class MlockallCheck implements BootstrapCheck {
private final boolean mlockallSet;
public MlockallCheck(final boolean mlockAllSet) {
this.mlockallSet = mlockAllSet;
}
@Override
public boolean check() {
return mlockallSet && !isMemoryLocked();
}
@Override
public String errorMessage() {
return "memory locking requested for elasticsearch process but memory is not locked";
}
// visible for testing
boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}
static class MaxNumberOfThreadsCheck implements BootstrapCheck {
// this should be plenty for machines up to 256 cores
private final long maxNumberOfThreadsThreshold = 1 << 12;
@Override
public boolean check() {
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max number of threads [%d] for user [%s] is too low, increase to at least [%d]",
getMaxNumberOfThreads(),
BootstrapInfo.getSystemProperties().get("user.name"),
maxNumberOfThreadsThreshold);
}
// visible for testing
long getMaxNumberOfThreads() {
return JNANatives.MAX_NUMBER_OF_THREADS;
}
}
static class MaxSizeVirtualMemoryCheck implements BootstrapCheck {
@Override
public boolean check() {
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max size virtual memory [%d] for user [%s] is too low, increase to [unlimited]",
getMaxSizeVirtualMemory(),
BootstrapInfo.getSystemProperties().get("user.name"));
}
// visible for testing
long getRlimInfinity() {
return JNACLibrary.RLIM_INFINITY;
}
// visible for testing
long getMaxSizeVirtualMemory() {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
}
}
static class MaxMapCountCheck implements BootstrapCheck {
private final long limit = 1 << 18;
@Override
public boolean check() {
return getMaxMapCount() != -1 && getMaxMapCount() < limit;
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"max virtual memory areas vm.max_map_count [%d] is too low, increase to at least [%d]",
getMaxMapCount(),
limit);
}
// visible for testing
long getMaxMapCount() {
return getMaxMapCount(Loggers.getLogger(BootstrapChecks.class));
}
// visible for testing
long getMaxMapCount(Logger logger) {
final Path path = getProcSysVmMaxMapCountPath();
try (final BufferedReader bufferedReader = getBufferedReader(path)) {
final String rawProcSysVmMaxMapCount = readProcSysVmMaxMapCount(bufferedReader);
if (rawProcSysVmMaxMapCount != null) {
try {
return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount);
} catch (final NumberFormatException e) {
logger.warn(
(Supplier<?>) () -> new ParameterizedMessage(
"unable to parse vm.max_map_count [{}]",
rawProcSysVmMaxMapCount),
e);
}
}
} catch (final IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e);
}
return -1;
}
@SuppressForbidden(reason = "access /proc/sys/vm/max_map_count")
private Path getProcSysVmMaxMapCountPath() {
return PathUtils.get("/proc/sys/vm/max_map_count");
}
// visible for testing
BufferedReader getBufferedReader(final Path path) throws IOException {
return Files.newBufferedReader(path);
}
// visible for testing
String readProcSysVmMaxMapCount(final BufferedReader bufferedReader) throws IOException {
return bufferedReader.readLine();
}
// visible for testing
long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws NumberFormatException {
return Long.parseLong(procSysVmMaxMapCount);
}
}
static class ClientJvmCheck implements BootstrapCheck {
@Override
public boolean check() {
return getVmName().toLowerCase(Locale.ROOT).contains("client");
}
// visible for testing
String getVmName() {
return JvmInfo.jvmInfo().getVmName();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the client VM [%s] but should be using a server VM for the best performance",
getVmName());
}
}
/**
* Checks if the serial collector is in use. This collector is single-threaded and devastating
* for performance and should not be used for a server application like Elasticsearch.
*/
static class UseSerialGCCheck implements BootstrapCheck {
@Override
public boolean check() {
return getUseSerialGC().equals("true");
}
// visible for testing
String getUseSerialGC() {
return JvmInfo.jvmInfo().useSerialGC();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM is using the serial collector but should not be for the best performance; " +
"either it's the default for the VM [%s] or -XX:+UseSerialGC was explicitly specified",
JvmInfo.jvmInfo().getVmName());
}
}
/**
* Bootstrap check that if system call filters are enabled, then system call filters must have installed successfully.
*/
static class SystemCallFilterCheck implements BootstrapCheck {
private final boolean areSystemCallFiltersEnabled;
SystemCallFilterCheck(final boolean areSystemCallFiltersEnabled) {
this.areSystemCallFiltersEnabled = areSystemCallFiltersEnabled;
}
@Override
public boolean check() {
return areSystemCallFiltersEnabled && !isSeccompInstalled();
}
// visible for testing
boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
@Override
public String errorMessage() {
return "system call filters failed to install; " +
"check the logs and fix your configuration or disable system call filters at your own risk";
}
}
abstract static class MightForkCheck implements BootstrapCheck {
@Override
public boolean check() {
return isSeccompInstalled() && mightFork();
}
// visible for testing
boolean isSeccompInstalled() {
return Natives.isSeccompInstalled();
}
// visible for testing
abstract boolean mightFork();
@Override
public final boolean alwaysEnforce() {
return true;
}
}
static class OnErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onError = onError();
return onError != null && !onError.equals("");
}
// visible for testing
String onError() {
return JvmInfo.jvmInfo().onError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
static class OnOutOfMemoryErrorCheck extends MightForkCheck {
@Override
boolean mightFork() {
final String onOutOfMemoryError = onOutOfMemoryError();
return onOutOfMemoryError != null && !onOutOfMemoryError.equals("");
}
// visible for testing
String onOutOfMemoryError() {
return JvmInfo.jvmInfo().onOutOfMemoryError();
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
onOutOfMemoryError(),
BootstrapSettings.SECCOMP_SETTING.getKey());
}
}
/**
* Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled.
*/
static class G1GCCheck implements BootstrapCheck {
@Override
public boolean check() {
if ("Oracle Corporation".equals(jvmVendor()) && isJava8() && isG1GCEnabled()) {
final String jvmVersion = jvmVersion();
// HotSpot versions on Java 8 match this regular expression; note that this changes with Java 9 after JEP-223
final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)-b\\d+");
final Matcher matcher = pattern.matcher(jvmVersion);
final boolean matches = matcher.matches();
assert matches : jvmVersion;
final int major = Integer.parseInt(matcher.group(1));
final int update = Integer.parseInt(matcher.group(2));
// HotSpot versions for Java 8 have major version 25, the bad versions are all versions prior to update 40
return major == 25 && update < 40;
} else {
return false;
}
}
// visible for testing
String jvmVendor() {
return Constants.JVM_VENDOR;
}
// visible for testing
boolean isG1GCEnabled() {
assert "Oracle Corporation".equals(jvmVendor());
return JvmInfo.jvmInfo().useG1GC().equals("true");
}
// visible for testing
String jvmVersion() {
assert "Oracle Corporation".equals(jvmVendor());
return Constants.JVM_VERSION;
}
// visible for testing
boolean isJava8() {
assert "Oracle Corporation".equals(jvmVendor());
return JavaVersion.current().equals(JavaVersion.parse("1.8"));
}
@Override
public String errorMessage() {
return String.format(
Locale.ROOT,
"JVM version [%s] can cause data corruption when used with G1GC; upgrade to at least Java 8u40", jvmVersion());
}
}
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.InvalidAliasNameException;
import java.io.IOException;
import java.util.Optional;
import java.util.function.Function;
/**
@ -139,10 +138,8 @@ public class AliasValidator extends AbstractComponent {
private static void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
QueryParseContext queryParseContext = queryShardContext.newParseContext(parser);
Optional<QueryBuilder> parseInnerQueryBuilder = queryParseContext.parseInnerQueryBuilder();
if (parseInnerQueryBuilder.isPresent()) {
QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(parseInnerQueryBuilder.get(), queryShardContext);
queryBuilder.toFilter(queryShardContext);
}
QueryBuilder parseInnerQueryBuilder = queryParseContext.parseInnerQueryBuilder();
QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(parseInnerQueryBuilder, queryShardContext);
queryBuilder.toFilter(queryShardContext);
}
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -43,7 +44,6 @@ import java.util.EnumSet;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
/**
* A collection of tombstones for explicitly marking indices as deleted in the cluster state.
@ -367,7 +367,7 @@ public final class IndexGraveyard implements MetaData.Custom {
TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY));
}
static BiFunction<XContentParser, ParseFieldMatcherSupplier, Tombstone> getParser() {
static ContextParser<ParseFieldMatcherSupplier, Tombstone> getParser() {
return (p, c) -> TOMBSTONE_PARSER.apply(p, c).build();
}

View File

@ -753,80 +753,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return new Builder(metaData);
}
/** All known byte-sized cluster settings. */
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()));
/** All known time cluster settings. */
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(),
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(),
DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(),
ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey()));
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey();
String settingValue = ent.getValue();
if (CLUSTER_BYTES_SIZE_SETTINGS.contains(settingName)) {
try {
Long.parseLong(settingValue);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (bytes); now we add it:
logger.warn("byte-sized cluster setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error", settingName, settingValue);
if (newPersistentSettings == null) {
newPersistentSettings = Settings.builder();
newPersistentSettings.put(metaData.persistentSettings());
}
newPersistentSettings.put(settingName, settingValue + "b");
}
if (CLUSTER_TIME_SETTINGS.contains(settingName)) {
try {
Long.parseLong(settingValue);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (ms); now we add it:
logger.warn("time cluster setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error", settingName, settingValue);
if (newPersistentSettings == null) {
newPersistentSettings = Settings.builder();
newPersistentSettings.put(metaData.persistentSettings());
}
newPersistentSettings.put(settingName, settingValue + "ms");
}
}
if (newPersistentSettings != null) {
return new MetaData(
metaData.clusterUUID(),
metaData.version(),
metaData.transientSettings(),
newPersistentSettings.build(),
metaData.getIndices(),
metaData.getTemplates(),
metaData.getCustoms(),
metaData.getConcreteAllIndices(),
metaData.getConcreteAllOpenIndices(),
metaData.getConcreteAllClosedIndices(),
metaData.getAliasAndIndexLookup());
} else {
// No changes:
return metaData;
}
}
public static class Builder {
private String clusterUUID;

View File

@ -59,6 +59,54 @@ public class SameShardAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());
Decision decision = decideSameNode(shardRouting, node, allocation, assignedShards);
if (decision.type() == Decision.Type.NO || sameHost == false) {
// if its already a NO decision looking at the node, or we aren't configured to look at the host, return the decision
return decision;
}
if (node.node() != null) {
for (RoutingNode checkNode : allocation.routingNodes()) {
if (checkNode.node() == null) {
continue;
}
// check if its on the same host as the one we want to allocate to
boolean checkNodeOnSameHostName = false;
boolean checkNodeOnSameHostAddress = false;
if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {
if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {
checkNodeOnSameHostAddress = true;
}
} else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {
if (checkNode.node().getHostName().equals(node.node().getHostName())) {
checkNodeOnSameHostName = true;
}
}
if (checkNodeOnSameHostAddress || checkNodeOnSameHostName) {
for (ShardRouting assignedShard : assignedShards) {
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
String hostType = checkNodeOnSameHostAddress ? "address" : "name";
String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();
return allocation.decision(Decision.NO, NAME,
"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; " +
"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies",
hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());
}
}
}
}
}
return allocation.decision(Decision.YES, NAME, "the shard does not exist on the same host");
}
@Override
public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
assert shardRouting.primary() : "must not call force allocate on a non-primary shard";
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId());
return decideSameNode(shardRouting, node, allocation, assignedShards);
}
private Decision decideSameNode(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation,
Iterable<ShardRouting> assignedShards) {
for (ShardRouting assignedShard : assignedShards) {
if (node.nodeId().equals(assignedShard.currentNodeId())) {
if (assignedShard.isSameAllocation(shardRouting)) {
@ -72,39 +120,6 @@ public class SameShardAllocationDecider extends AllocationDecider {
}
}
}
if (sameHost) {
if (node.node() != null) {
for (RoutingNode checkNode : allocation.routingNodes()) {
if (checkNode.node() == null) {
continue;
}
// check if its on the same host as the one we want to allocate to
boolean checkNodeOnSameHostName = false;
boolean checkNodeOnSameHostAddress = false;
if (Strings.hasLength(checkNode.node().getHostAddress()) && Strings.hasLength(node.node().getHostAddress())) {
if (checkNode.node().getHostAddress().equals(node.node().getHostAddress())) {
checkNodeOnSameHostAddress = true;
}
} else if (Strings.hasLength(checkNode.node().getHostName()) && Strings.hasLength(node.node().getHostName())) {
if (checkNode.node().getHostName().equals(node.node().getHostName())) {
checkNodeOnSameHostName = true;
}
}
if (checkNodeOnSameHostAddress || checkNodeOnSameHostName) {
for (ShardRouting assignedShard : assignedShards) {
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
String hostType = checkNodeOnSameHostAddress ? "address" : "name";
String host = checkNodeOnSameHostAddress ? node.node().getHostAddress() : node.node().getHostName();
return allocation.decision(Decision.NO, NAME,
"the shard cannot be allocated on host %s [%s], where it already exists on node [%s]; " +
"set [%s] to false to allow multiple nodes on the same host to hold the same shard copies",
hostType, host, node.nodeId(), CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey());
}
}
}
}
}
}
return allocation.decision(Decision.YES, NAME, "the shard does not exist on the same " + (sameHost ? "host" : "node"));
return allocation.decision(Decision.YES, NAME, "the shard does not exist on the same node");
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.util.function.Consumer;
/**
* A {@link Consumer}-like interface which allows throwing checked exceptions.
*/
@FunctionalInterface
public interface CheckedConsumer<T, E extends Exception> {
void accept(T t) throws E;
}

View File

@ -24,9 +24,11 @@ import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
import java.io.BufferedInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.NoSuchFileException;
import java.util.Map;
/**
@ -99,7 +101,11 @@ public class URLBlobContainer extends AbstractBlobContainer {
@Override
public InputStream readBlob(String name) throws IOException {
return new BufferedInputStream(new URL(path, name).openStream(), blobStore.bufferSizeInBytes());
try {
return new BufferedInputStream(new URL(path, name).openStream(), blobStore.bufferSizeInBytes());
} catch (FileNotFoundException fnfe) {
throw new NoSuchFileException("[" + name + "] blob not found");
}
}
@Override

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.joda;
import org.joda.time.format.DateTimeFormatter;
import java.util.Locale;
import java.util.Objects;
/**
* A simple wrapper around {@link DateTimeFormatter} that retains the
@ -43,9 +44,9 @@ public class FormatDateTimeFormatter {
public FormatDateTimeFormatter(String format, DateTimeFormatter parser, DateTimeFormatter printer, Locale locale) {
this.format = format;
this.locale = locale;
this.printer = locale == null ? printer.withDefaultYear(1970) : printer.withLocale(locale).withDefaultYear(1970);
this.parser = locale == null ? parser.withDefaultYear(1970) : parser.withLocale(locale).withDefaultYear(1970);
this.locale = Objects.requireNonNull(locale, "A locale is required as JODA otherwise uses the default locale");
this.printer = printer.withLocale(locale).withDefaultYear(1970);
this.parser = parser.withLocale(locale).withDefaultYear(1970);
}
public String format() {

View File

@ -19,14 +19,25 @@
package org.elasticsearch.common.lucene.uid;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import java.io.IOException;
import java.util.List;
@ -143,4 +154,115 @@ public class Versions {
final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term);
return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version;
}
/**
* Returns the sequence number for the given uid term, returning
* {@code SequenceNumbersService.UNASSIGNED_SEQ_NO} if none is found.
*/
public static long loadSeqNo(IndexReader reader, Term term) throws IOException {
assert term.field().equals(UidFieldMapper.NAME) : "can only load _seq_no by uid";
List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
LeafReader leaf = leaves.get(i).reader();
Bits liveDocs = leaf.getLiveDocs();
TermsEnum termsEnum = null;
SortedNumericDocValues dvField = null;
PostingsEnum docsEnum = null;
final Fields fields = leaf.fields();
if (fields != null) {
Terms terms = fields.terms(UidFieldMapper.NAME);
if (terms != null) {
termsEnum = terms.iterator();
assert termsEnum != null;
dvField = leaf.getSortedNumericDocValues(SeqNoFieldMapper.NAME);
assert dvField != null;
final BytesRef id = term.bytes();
if (termsEnum.seekExact(id)) {
// there may be more than one matching docID, in the
// case of nested docs, so we want the last one:
docsEnum = termsEnum.postings(docsEnum, 0);
int docID = DocIdSetIterator.NO_MORE_DOCS;
for (int d = docsEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
if (liveDocs != null && liveDocs.get(d) == false) {
continue;
}
docID = d;
}
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
dvField.setDocument(docID);
assert dvField.count() == 1 : "expected only a single value for _seq_no but got " +
dvField.count();
return dvField.valueAt(0);
}
}
}
}
}
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
/**
* Returns the primary term for the given uid term, returning {@code 0} if none is found.
*/
public static long loadPrimaryTerm(IndexReader reader, Term term) throws IOException {
assert term.field().equals(UidFieldMapper.NAME) : "can only load _primary_term by uid";
List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return 0;
}
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
LeafReader leaf = leaves.get(i).reader();
Bits liveDocs = leaf.getLiveDocs();
TermsEnum termsEnum = null;
NumericDocValues dvField = null;
PostingsEnum docsEnum = null;
final Fields fields = leaf.fields();
if (fields != null) {
Terms terms = fields.terms(UidFieldMapper.NAME);
if (terms != null) {
termsEnum = terms.iterator();
assert termsEnum != null;
dvField = leaf.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
assert dvField != null;
final BytesRef id = term.bytes();
if (termsEnum.seekExact(id)) {
// there may be more than one matching docID, in the
// case of nested docs, so we want the last one:
docsEnum = termsEnum.postings(docsEnum, 0);
int docID = DocIdSetIterator.NO_MORE_DOCS;
for (int d = docsEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
if (liveDocs != null && liveDocs.get(d) == false) {
continue;
}
docID = d;
}
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
return dvField.get(docID);
}
}
}
}
}
return 0;
}
}

View File

@ -87,7 +87,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_REFORMAT,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING,
IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING,

View File

@ -35,14 +35,7 @@ import java.util.function.BiFunction;
* Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared.
*/
public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatcherSupplier>
implements BiFunction<XContentParser, Context, Value> {
/**
* Reads an object from a parser using some context.
*/
@FunctionalInterface
public interface ContextParser<Context, T> {
T parse(XContentParser p, Context c) throws IOException;
}
implements BiFunction<XContentParser, Context, Value>, ContextParser<Context, Value> {
/**
* Reads an object right from the parser without any context.
@ -54,7 +47,7 @@ public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatc
/**
* Declare some field. Usually it is easier to use {@link #declareString(BiConsumer, ParseField)} or
* {@link #declareObject(BiConsumer, BiFunction, ParseField)} rather than call this directly.
* {@link #declareObject(BiConsumer, ContextParser, ParseField)} rather than call this directly.
*/
public abstract <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField,
ValueType type);
@ -66,8 +59,8 @@ public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatc
declareField(consumer, (p, c) -> parser.parse(p), parseField, type);
}
public <T> void declareObject(BiConsumer<Value, T> consumer, BiFunction<XContentParser, Context, T> objectParser, ParseField field) {
declareField(consumer, (p, c) -> objectParser.apply(p, c), field, ValueType.OBJECT);
public <T> void declareObject(BiConsumer<Value, T> consumer, ContextParser<Context, T> objectParser, ParseField field) {
declareField(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT);
}
public void declareFloat(BiConsumer<Value, Float> consumer, ParseField field) {
@ -103,9 +96,9 @@ public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatc
declareField(consumer, XContentParser::booleanValue, field, ValueType.BOOLEAN);
}
public <T> void declareObjectArray(BiConsumer<Value, List<T>> consumer, BiFunction<XContentParser, Context, T> objectParser,
public <T> void declareObjectArray(BiConsumer<Value, List<T>> consumer, ContextParser<Context, T> objectParser,
ParseField field) {
declareField(consumer, (p, c) -> parseArray(p, () -> objectParser.apply(p, c)), field, ValueType.OBJECT_ARRAY);
declareField(consumer, (p, c) -> parseArray(p, () -> objectParser.parse(p, c)), field, ValueType.OBJECT_ARRAY);
}
public void declareStringArray(BiConsumer<Value, List<String>> consumer, ParseField field) {
@ -144,7 +137,7 @@ public abstract class AbstractObjectParser<Value, Context extends ParseFieldMatc
}
private static <T> List<T> parseArray(XContentParser parser, IOSupplier<T> supplier) throws IOException {
List<T> list = new ArrayList<>();
if (parser.currentToken().isValue()) {
if (parser.currentToken().isValue() || parser.currentToken() == XContentParser.Token.START_OBJECT) {
list.add(supplier.get()); // single value
} else {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {

View File

@ -140,12 +140,17 @@ public final class ConstructingObjectParser<Value, Context extends ParseFieldMat
@Override
public Value apply(XContentParser parser, Context context) {
try {
return objectParser.parse(parser, new Target(parser), context).finish();
return parse(parser, context);
} catch (IOException e) {
throw new ParsingException(parser.getTokenLocation(), "[" + objectParser.getName() + "] failed to parse object", e);
}
}
@Override
public Value parse(XContentParser parser, Context context) throws IOException {
return objectParser.parse(parser, new Target(parser), context).finish();
}
/**
* Pass the {@linkplain BiConsumer} this returns the declare methods to declare a required constructor argument. See this class's
* javadoc for an example. The order in which these are declared matters: it is the order that they come in the array passed to

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent;
import java.io.IOException;
/**
* Reads an object from a parser using some context.
*/
@FunctionalInterface
public interface ContextParser<Context, T> {
T parse(XContentParser p, Context c) throws IOException;
}

View File

@ -47,8 +47,8 @@ import static org.elasticsearch.common.xcontent.XContentParser.Token.VALUE_STRIN
/**
* A declarative, stateless parser that turns XContent into setter calls. A single parser should be defined for each object being parsed,
* nested elements can be added via {@link #declareObject(BiConsumer, BiFunction, ParseField)} which should be satisfied where possible by
* passing another instance of {@link ObjectParser}, this one customized for that Object.
* nested elements can be added via {@link #declareObject(BiConsumer, ContextParser, ParseField)} which should be satisfied where possible
* by passing another instance of {@link ObjectParser}, this one customized for that Object.
* <p>
* This class works well for object that do have a constructor argument or that can be built using information available from earlier in the
* XContent. For objects that have constructors with required arguments that are specified on the same level as other fields see
@ -126,6 +126,7 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
* @return a new value instance drawn from the provided value supplier on {@link #ObjectParser(String, Supplier)}
* @throws IOException if an IOException occurs.
*/
@Override
public Value parse(XContentParser parser, Context context) throws IOException {
if (valueSupplier == null) {
throw new NullPointerException("valueSupplier is not set");
@ -463,6 +464,7 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
OBJECT_ARRAY(START_OBJECT, START_ARRAY),
OBJECT_OR_BOOLEAN(START_OBJECT, VALUE_BOOLEAN),
OBJECT_OR_STRING(START_OBJECT, VALUE_STRING),
OBJECT_ARRAY_OR_STRING(START_OBJECT, START_ARRAY, VALUE_STRING),
VALUE(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING);
private final EnumSet<XContentParser.Token> tokens;

View File

@ -72,6 +72,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
@ -207,7 +208,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
joinThreadControl.start();
zenPing.start(this);
this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings);
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::rejoin, logger);
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger);
}
@Override
@ -306,18 +307,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
} catch (FailedToCommitClusterStateException t) {
// cluster service logs a WARN message
logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes());
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
return rejoin(currentState, "failed to publish to min_master_nodes");
}
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
submitRejoin("zen-disco-failed-to-publish");
throw t;
}
@ -505,12 +495,27 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}
}
private void submitRejoin(String source) {
clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
return rejoin(currentState, source);
}
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
}
// visible for testing
static class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExecutor<NodeRemovalClusterStateTaskExecutor.Task>, ClusterStateTaskListener {
private final AllocationService allocationService;
private final ElectMasterService electMasterService;
private final BiFunction<ClusterState, String, ClusterState> rejoin;
private final Consumer<String> rejoin;
private final Logger logger;
static class Task {
@ -540,7 +545,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
NodeRemovalClusterStateTaskExecutor(
final AllocationService allocationService,
final ElectMasterService electMasterService,
final BiFunction<ClusterState, String, ClusterState> rejoin,
final Consumer<String> rejoin,
final Logger logger) {
this.allocationService = allocationService;
this.electMasterService = electMasterService;
@ -570,7 +575,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
final BatchResult.Builder<Task> resultBuilder = BatchResult.<Task>builder().successes(tasks);
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
rejoin.accept("not enough master nodes");
return resultBuilder.build(currentState);
} else {
return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)));
}

View File

@ -106,14 +106,7 @@ public class MetaStateService extends AbstractComponent {
* Loads the global state, *without* index state, see {@link #loadFullState()} for that.
*/
MetaData loadGlobalState() throws IOException {
MetaData globalState = MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths());
// ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing
// TODO: can we somehow only do this for pre-2.0 cluster state?
if (globalState != null) {
return MetaData.addDefaultUnitsIfNeeded(logger, globalState);
} else {
return null;
}
return MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths());
}
/**

View File

@ -25,14 +25,14 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.shard.SearchOperationListener;
import org.elasticsearch.search.internal.SearchContext;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
public final class SearchSlowLog implements SearchOperationListener {
private boolean reformat;
private long queryWarnThreshold;
private long queryInfoThreshold;
private long queryDebugThreshold;
@ -73,20 +73,17 @@ public final class SearchSlowLog implements SearchOperationListener {
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING =
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1),
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
public static final Setting<Boolean> INDEX_SEARCH_SLOWLOG_REFORMAT =
Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, Property.Dynamic, Property.IndexScope);
public static final Setting<SlowLogLevel> INDEX_SEARCH_SLOWLOG_LEVEL =
new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic,
Property.IndexScope);
private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false"));
public SearchSlowLog(IndexSettings indexSettings) {
this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings());
this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings());
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_REFORMAT, this::setReformat);
this.reformat = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_REFORMAT);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold);
this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, this::setQueryInfoThreshold);
@ -117,38 +114,36 @@ public final class SearchSlowLog implements SearchOperationListener {
@Override
public void onQueryPhase(SearchContext context, long tookInNanos) {
if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {
queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) {
queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) {
queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) {
queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
}
}
@Override
public void onFetchPhase(SearchContext context, long tookInNanos) {
if (fetchWarnThreshold >= 0 && tookInNanos > fetchWarnThreshold) {
fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
fetchLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (fetchInfoThreshold >= 0 && tookInNanos > fetchInfoThreshold) {
fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
fetchLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (fetchDebugThreshold >= 0 && tookInNanos > fetchDebugThreshold) {
fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
fetchLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
} else if (fetchTraceThreshold >= 0 && tookInNanos > fetchTraceThreshold) {
fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));
fetchLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos));
}
}
static final class SlowLogSearchContextPrinter {
private final SearchContext context;
private final long tookInNanos;
private final boolean reformat;
public SlowLogSearchContextPrinter(SearchContext context, long tookInNanos, boolean reformat) {
public SlowLogSearchContextPrinter(SearchContext context, long tookInNanos) {
this.context = context;
this.tookInNanos = tookInNanos;
this.reformat = reformat;
}
@Override
@ -172,7 +167,7 @@ public final class SearchSlowLog implements SearchOperationListener {
}
sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], ");
if (context.request().source() != null) {
sb.append("source[").append(context.request().source()).append("], ");
sb.append("source[").append(context.request().source().toString(FORMAT_PARAMS)).append("], ");
} else {
sb.append("source[], ");
}
@ -180,10 +175,6 @@ public final class SearchSlowLog implements SearchOperationListener {
}
}
private void setReformat(boolean reformat) {
this.reformat = reformat;
}
private void setQueryWarnThreshold(TimeValue warnThreshold) {
this.queryWarnThreshold = warnThreshold.nanos();
}
@ -216,10 +207,6 @@ public final class SearchSlowLog implements SearchOperationListener {
this.fetchTraceThreshold = traceThreshold.nanos();
}
boolean isReformat() {
return reformat;
}
long getQueryWarnThreshold() {
return queryWarnThreshold;
}

View File

@ -47,6 +47,7 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -925,13 +926,15 @@ public abstract class Engine implements Closeable {
private final Term uid;
private final long version;
private final long seqNo;
private final long primaryTerm;
private final VersionType versionType;
private final Origin origin;
private final long startTime;
public Operation(Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime) {
public Operation(Term uid, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin, long startTime) {
this.uid = uid;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
this.versionType = versionType;
this.origin = origin;
@ -965,6 +968,10 @@ public abstract class Engine implements Closeable {
return seqNo;
}
public long primaryTerm() {
return primaryTerm;
}
public abstract int estimatedSizeInBytes();
public VersionType versionType() {
@ -991,9 +998,9 @@ public abstract class Engine implements Closeable {
private final long autoGeneratedIdTimestamp;
private final boolean isRetry;
public Index(Term uid, ParsedDocument doc, long seqNo, long version, VersionType versionType, Origin origin, long startTime,
long autoGeneratedIdTimestamp, boolean isRetry) {
super(uid, seqNo, version, versionType, origin, startTime);
public Index(Term uid, ParsedDocument doc, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin,
long startTime, long autoGeneratedIdTimestamp, boolean isRetry) {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
this.doc = doc;
this.isRetry = isRetry;
this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
@ -1004,7 +1011,8 @@ public abstract class Engine implements Closeable {
} // TEST ONLY
Index(Term uid, ParsedDocument doc, long version) {
this(uid, doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime(), -1, false);
this(uid, doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, version, VersionType.INTERNAL,
Origin.PRIMARY, System.nanoTime(), -1, false);
}
public ParsedDocument parsedDoc() {
@ -1071,18 +1079,20 @@ public abstract class Engine implements Closeable {
private final String type;
private final String id;
public Delete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Origin origin, long startTime) {
super(uid, seqNo, version, versionType, origin, startTime);
public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType,
Origin origin, long startTime) {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
this.type = type;
this.id = id;
}
public Delete(String type, String id, Term uid) {
this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
this(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
}
public Delete(Delete template, VersionType versionType) {
this(template.type(), template.id(), template.uid(), template.seqNo(), template.version(), versionType, template.origin(), template.startTime());
this(template.type(), template.id(), template.uid(), template.seqNo(), template.primaryTerm(), template.version(),
versionType, template.origin(), template.startTime());
}
@Override

View File

@ -650,6 +650,16 @@ public class InternalEngine extends Engine {
}
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
index.parsedDoc().version().setLongValue(updatedVersion);
// Update the document's sequence number and primary term, the
// sequence number here is derived here from either the sequence
// number service if this is on the primary, or the existing
// document's sequence number if this is on the replica. The
// primary term here has already been set, see
// IndexShard.prepareIndex where the Engine.Index operation is
// created
index.parsedDoc().updateSeqID(seqNo, index.primaryTerm());
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
// document does not exists, we can optimize for create, but double check if assertions are running
assert assertDocDoesNotExist(index, canOptimizeAddDocument == false);

View File

@ -117,6 +117,9 @@ public class DocumentMapperParser {
MetadataFieldMapper.TypeParser typeParser = rootTypeParsers.get(fieldName);
if (typeParser != null) {
iterator.remove();
if (false == fieldNode instanceof Map) {
throw new IllegalArgumentException("[_parent] must be an object containing [type]");
}
Map<String, Object> fieldNodeMap = (Map<String, Object>) fieldNode;
docBuilder.put(typeParser.parse(fieldName, fieldNodeMap, parserContext));
fieldNodeMap.remove("type");

View File

@ -148,7 +148,7 @@ final class DocumentParser {
private static ParsedDocument parsedDocument(SourceToParse source, ParseContext.InternalParseContext context, Mapping update) {
return new ParsedDocument(
context.version(),
context.seqNo(),
context.seqID(),
context.sourceToParse().id(),
context.sourceToParse().type(),
source.routing(),

View File

@ -205,15 +205,13 @@ public class DynamicTemplate implements ToXContent {
try {
xcontentFieldType = XContentFieldType.fromString(matchMappingType);
} catch (IllegalArgumentException e) {
// TODO: do this in 6.0
/*if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) {
if (indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
throw e;
}*/
DEPRECATION_LOGGER.deprecated("Ignoring unrecognized match_mapping_type: [" + matchMappingType + "]");
// this template is on an unknown type so it will never match anything
// null indicates that the template should be ignored
return null;
} else {
// this template is on an unknown type so it will never match anything
// null indicates that the template should be ignored
return null;
}
}
}
return new DynamicTemplate(name, pathMatch, pathUnmatch, match, unmatch, xcontentFieldType, MatchType.fromString(matchPattern), mapping);

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.all.AllEntries;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import java.util.ArrayList;
import java.util.Iterator;
@ -254,13 +255,13 @@ public abstract class ParseContext {
}
@Override
public Field seqNo() {
return in.seqNo();
public SeqNoFieldMapper.SequenceID seqID() {
return in.seqID();
}
@Override
public void seqNo(Field seqNo) {
in.seqNo(seqNo);
public void seqID(SeqNoFieldMapper.SequenceID seqID) {
in.seqID(seqID);
}
@Override
@ -310,7 +311,7 @@ public abstract class ParseContext {
private Field version;
private Field seqNo;
private SeqNoFieldMapper.SequenceID seqID;
private final AllEntries allEntries;
@ -404,16 +405,15 @@ public abstract class ParseContext {
}
@Override
public Field seqNo() {
return this.seqNo;
public SeqNoFieldMapper.SequenceID seqID() {
return this.seqID;
}
@Override
public void seqNo(Field seqNo) {
this.seqNo = seqNo;
public void seqID(SeqNoFieldMapper.SequenceID seqID) {
this.seqID = seqID;
}
@Override
public AllEntries allEntries() {
return this.allEntries;
@ -540,9 +540,9 @@ public abstract class ParseContext {
public abstract void version(Field version);
public abstract Field seqNo();
public abstract SeqNoFieldMapper.SequenceID seqID();
public abstract void seqNo(Field seqNo);
public abstract void seqID(SeqNoFieldMapper.SequenceID seqID);
public final boolean includeInAll(Boolean includeInAll, FieldMapper mapper) {
return includeInAll(includeInAll, mapper.fieldType().indexOptions() != IndexOptions.NONE);

View File

@ -23,6 +23,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import java.util.List;
@ -35,7 +36,7 @@ public class ParsedDocument {
private final String id, type;
private final BytesRef uid;
private final Field seqNo;
private final SeqNoFieldMapper.SequenceID seqID;
private final String routing;
@ -47,17 +48,16 @@ public class ParsedDocument {
private String parent;
public ParsedDocument(
Field version,
Field seqNo,
String id,
String type,
String routing,
List<Document> documents,
BytesReference source,
Mapping dynamicMappingsUpdate) {
public ParsedDocument(Field version,
SeqNoFieldMapper.SequenceID seqID,
String id,
String type,
String routing,
List<Document> documents,
BytesReference source,
Mapping dynamicMappingsUpdate) {
this.version = version;
this.seqNo = seqNo;
this.seqID = seqID;
this.id = id;
this.type = type;
this.uid = Uid.createUidAsBytes(type, id);
@ -83,8 +83,10 @@ public class ParsedDocument {
return version;
}
public Field seqNo() {
return seqNo;
public void updateSeqID(long sequenceNumber, long primaryTerm) {
this.seqID.seqNo.setLongValue(sequenceNumber);
this.seqID.seqNoDocValue.setLongValue(sequenceNumber);
this.seqID.primaryTerm.setLongValue(primaryTerm);
}
public String routing() {

View File

@ -0,0 +1,298 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Mapper for the {@code _seq_no} field.
*
* We expect to use the seq# for sorting, during collision checking and for
* doing range searches. Therefore the {@code _seq_no} field is stored both
* as a numeric doc value and as numeric indexed field.
*
* This mapper also manages the primary term field, which has no ES named
* equivalent. The primary term is only used during collision after receiving
* identical seq# values for two document copies. The primary term is stored as
* a doc value field without being indexed, since it is only intended for use
* as a key-value lookup.
*/
public class SeqNoFieldMapper extends MetadataFieldMapper {
/**
* A sequence ID, which is made up of a sequence number (both the searchable
* and doc_value version of the field) and the primary term.
*/
public static class SequenceID {
public final Field seqNo;
public final Field seqNoDocValue;
public final Field primaryTerm;
public SequenceID(Field seqNo, Field seqNoDocValue, Field primaryTerm) {
Objects.requireNonNull(seqNo, "sequence number field cannot be null");
Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null");
Objects.requireNonNull(primaryTerm, "primary term field cannot be null");
this.seqNo = seqNo;
this.seqNoDocValue = seqNoDocValue;
this.primaryTerm = primaryTerm;
}
public static SequenceID emptySeqID() {
return new SequenceID(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
new SortedNumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
}
}
public static final String NAME = "_seq_no";
public static final String CONTENT_TYPE = "_seq_no";
public static final String PRIMARY_TERM_NAME = "_primary_term";
public static class SeqNoDefaults {
public static final String NAME = SeqNoFieldMapper.NAME;
public static final MappedFieldType FIELD_TYPE = new SeqNoFieldType();
static {
FIELD_TYPE.setName(NAME);
FIELD_TYPE.setDocValuesType(DocValuesType.SORTED);
FIELD_TYPE.setHasDocValues(true);
FIELD_TYPE.freeze();
}
}
public static class Builder extends MetadataFieldMapper.Builder<Builder, SeqNoFieldMapper> {
public Builder() {
super(SeqNoDefaults.NAME, SeqNoDefaults.FIELD_TYPE, SeqNoDefaults.FIELD_TYPE);
}
@Override
public SeqNoFieldMapper build(BuilderContext context) {
return new SeqNoFieldMapper(context.indexSettings());
}
}
public static class TypeParser implements MetadataFieldMapper.TypeParser {
@Override
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
throws MapperParsingException {
throw new MapperParsingException(NAME + " is not configurable");
}
@Override
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
return new SeqNoFieldMapper(indexSettings);
}
}
static final class SeqNoFieldType extends MappedFieldType {
public SeqNoFieldType() {
}
protected SeqNoFieldType(SeqNoFieldType ref) {
super(ref);
}
@Override
public MappedFieldType clone() {
return new SeqNoFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
private long parse(Object value) {
if (value instanceof Number) {
double doubleValue = ((Number) value).doubleValue();
if (doubleValue < Long.MIN_VALUE || doubleValue > Long.MAX_VALUE) {
throw new IllegalArgumentException("Value [" + value + "] is out of range for a long");
}
if (doubleValue % 1 != 0) {
throw new IllegalArgumentException("Value [" + value + "] has a decimal part");
}
return ((Number) value).longValue();
}
if (value instanceof BytesRef) {
value = ((BytesRef) value).utf8ToString();
}
return Long.parseLong(value.toString());
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
long v = parse(value);
return LongPoint.newExactQuery(name(), v);
}
@Override
public Query termsQuery(List<?> values, @Nullable QueryShardContext context) {
long[] v = new long[values.size()];
for (int i = 0; i < values.size(); ++i) {
v[i] = parse(values.get(i));
}
return LongPoint.newSetQuery(name(), v);
}
@Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower,
boolean includeUpper, QueryShardContext context) {
long l = Long.MIN_VALUE;
long u = Long.MAX_VALUE;
if (lowerTerm != null) {
l = parse(lowerTerm);
if (includeLower == false) {
if (l == Long.MAX_VALUE) {
return new MatchNoDocsQuery();
}
++l;
}
}
if (upperTerm != null) {
u = parse(upperTerm);
if (includeUpper == false) {
if (u == Long.MIN_VALUE) {
return new MatchNoDocsQuery();
}
--u;
}
}
return LongPoint.newRangeQuery(name(), l, u);
}
@Override
public IndexFieldData.Builder fielddataBuilder() {
failIfNoDocValues();
return new DocValuesIndexFieldData.Builder().numericType(NumericType.LONG);
}
@Override
public FieldStats stats(IndexReader reader) throws IOException {
String fieldName = name();
long size = PointValues.size(reader, fieldName);
if (size == 0) {
return null;
}
int docCount = PointValues.getDocCount(reader, fieldName);
byte[] min = PointValues.getMinPackedValue(reader, fieldName);
byte[] max = PointValues.getMaxPackedValue(reader, fieldName);
return new FieldStats.Long(reader.maxDoc(),docCount, -1L, size, true, false,
LongPoint.decodeDimension(min, 0), LongPoint.decodeDimension(max, 0));
}
}
public SeqNoFieldMapper(Settings indexSettings) {
super(NAME, SeqNoDefaults.FIELD_TYPE, SeqNoDefaults.FIELD_TYPE, indexSettings);
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
// see InternalEngine.innerIndex to see where the real version value is set
// also see ParsedDocument.updateSeqID (called by innerIndex)
SequenceID seqID = SequenceID.emptySeqID();
context.seqID(seqID);
fields.add(seqID.seqNo);
fields.add(seqID.seqNoDocValue);
fields.add(seqID.primaryTerm);
}
@Override
public Mapper parse(ParseContext context) throws IOException {
// fields are added in parseCreateField
return null;
}
@Override
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with seqNo=1 and
// primaryTerm=0 so that Lucene doesn't write a Bitset for documents
// that don't have the field. This is consistent with the default value
// for efficiency.
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new LongPoint(NAME, 1));
doc.add(new SortedNumericDocValuesField(NAME, 1L));
doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L));
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// nothing to do
}
}

View File

@ -1,197 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.internal;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Bits;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/** Mapper for the _seq_no field. */
public class SeqNoFieldMapper extends MetadataFieldMapper {
public static final String NAME = "_seq_no";
public static final String CONTENT_TYPE = "_seq_no";
public static class Defaults {
public static final String NAME = SeqNoFieldMapper.NAME;
public static final MappedFieldType FIELD_TYPE = new SeqNoFieldType();
static {
FIELD_TYPE.setName(NAME);
FIELD_TYPE.setDocValuesType(DocValuesType.NUMERIC);
FIELD_TYPE.setHasDocValues(true);
FIELD_TYPE.freeze();
}
}
public static class Builder extends MetadataFieldMapper.Builder<Builder, SeqNoFieldMapper> {
public Builder() {
super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
}
@Override
public SeqNoFieldMapper build(BuilderContext context) {
return new SeqNoFieldMapper(context.indexSettings());
}
}
public static class TypeParser implements MetadataFieldMapper.TypeParser {
@Override
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
throws MapperParsingException {
throw new MapperParsingException(NAME + " is not configurable");
}
@Override
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
return new SeqNoFieldMapper(indexSettings);
}
}
static final class SeqNoFieldType extends MappedFieldType {
public SeqNoFieldType() {
}
protected SeqNoFieldType(SeqNoFieldType ref) {
super(ref);
}
@Override
public MappedFieldType clone() {
return new SeqNoFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
throw new QueryShardException(context, "SeqNoField field [" + name() + "] is not searchable");
}
@Override
public FieldStats stats(IndexReader reader) throws IOException {
// TODO: remove implementation when late-binding commits are possible
final List<LeafReaderContext> leaves = reader.leaves();
if (leaves.isEmpty()) {
return null;
}
long currentMin = Long.MAX_VALUE;
long currentMax = Long.MIN_VALUE;
boolean found = false;
for (int i = 0; i < leaves.size(); i++) {
final LeafReader leaf = leaves.get(i).reader();
final NumericDocValues values = leaf.getNumericDocValues(name());
if (values == null) continue;
final Bits bits = leaf.getLiveDocs();
for (int docID = 0; docID < leaf.maxDoc(); docID++) {
if (bits == null || bits.get(docID)) {
found = true;
final long value = values.get(docID);
currentMin = Math.min(currentMin, value);
currentMax = Math.max(currentMax, value);
}
}
}
return found ? new FieldStats.Long(reader.maxDoc(), 0, -1, -1, false, true, currentMin, currentMax) : null;
}
}
public SeqNoFieldMapper(Settings indexSettings) {
super(NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE, indexSettings);
}
@Override
public void preParse(ParseContext context) throws IOException {
super.parse(context);
}
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
// see InternalEngine.updateVersion to see where the real version value is set
final Field seqNo = new NumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO);
context.seqNo(seqNo);
fields.add(seqNo);
}
@Override
public Mapper parse(ParseContext context) throws IOException {
// _seq_no added in pre-parse
return null;
}
@Override
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with seqNo=1 so that Lucene doesn't write a Bitset for documents
// that don't have the field. This is consistent with the default value for efficiency.
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new NumericDocValuesField(NAME, 1L));
}
}
@Override
protected String contentType() {
return CONTENT_TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder;
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// nothing to do
}
}

View File

@ -37,7 +37,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
@ -299,7 +298,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
builder.endArray();
}
public static Optional<BoolQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
public static BoolQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
XContentParser parser = parseContext.parser();
boolean disableCoord = BoolQueryBuilder.DISABLE_COORD_DEFAULT;
@ -323,17 +322,17 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
} else if (token == XContentParser.Token.START_OBJECT) {
switch (currentFieldName) {
case MUST:
parseContext.parseInnerQueryBuilder().ifPresent(mustClauses::add);
mustClauses.add(parseContext.parseInnerQueryBuilder());
break;
case SHOULD:
parseContext.parseInnerQueryBuilder().ifPresent(shouldClauses::add);
shouldClauses.add(parseContext.parseInnerQueryBuilder());
break;
case FILTER:
parseContext.parseInnerQueryBuilder().ifPresent(filterClauses::add);
filterClauses.add(parseContext.parseInnerQueryBuilder());
break;
case MUST_NOT:
case MUSTNOT:
parseContext.parseInnerQueryBuilder().ifPresent(mustNotClauses::add);
mustNotClauses.add(parseContext.parseInnerQueryBuilder());
break;
default:
throw new ParsingException(parser.getTokenLocation(), "[bool] query does not support [" + currentFieldName + "]");
@ -342,17 +341,17 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
switch (currentFieldName) {
case MUST:
parseContext.parseInnerQueryBuilder().ifPresent(mustClauses::add);
mustClauses.add(parseContext.parseInnerQueryBuilder());
break;
case SHOULD:
parseContext.parseInnerQueryBuilder().ifPresent(shouldClauses::add);
shouldClauses.add(parseContext.parseInnerQueryBuilder());
break;
case FILTER:
parseContext.parseInnerQueryBuilder().ifPresent(filterClauses::add);
filterClauses.add(parseContext.parseInnerQueryBuilder());
break;
case MUST_NOT:
case MUSTNOT:
parseContext.parseInnerQueryBuilder().ifPresent(mustNotClauses::add);
mustNotClauses.add(parseContext.parseInnerQueryBuilder());
break;
default:
throw new ParsingException(parser.getTokenLocation(), "bool query does not support [" + currentFieldName + "]");
@ -394,7 +393,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
boolQuery.adjustPureNegative(adjustPureNegative);
boolQuery.minimumNumberShouldMatch(minimumShouldMatch);
boolQuery.queryName(queryName);
return Optional.of(boolQuery);
return boolQuery;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* The BoostingQuery class can be used to effectively demote results that match a given query.
@ -137,12 +136,12 @@ public class BoostingQueryBuilder extends AbstractQueryBuilder<BoostingQueryBuil
builder.endObject();
}
public static Optional<BoostingQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static BoostingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
Optional<QueryBuilder> positiveQuery = null;
QueryBuilder positiveQuery = null;
boolean positiveQueryFound = false;
Optional<QueryBuilder> negativeQuery = null;
QueryBuilder negativeQuery = null;
boolean negativeQueryFound = false;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
float negativeBoost = -1;
@ -186,15 +185,12 @@ public class BoostingQueryBuilder extends AbstractQueryBuilder<BoostingQueryBuil
throw new ParsingException(parser.getTokenLocation(),
"[boosting] query requires 'negative_boost' to be set to be a positive value'");
}
if (positiveQuery.isPresent() == false || negativeQuery.isPresent() == false) {
return Optional.empty();
}
BoostingQueryBuilder boostingQuery = new BoostingQueryBuilder(positiveQuery.get(), negativeQuery.get());
BoostingQueryBuilder boostingQuery = new BoostingQueryBuilder(positiveQuery, negativeQuery);
boostingQuery.negativeBoost(negativeBoost);
boostingQuery.boost(boost);
boostingQuery.queryName(queryName);
return Optional.of(boostingQuery);
return boostingQuery;
}
@Override

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* CommonTermsQuery query is a query that executes high-frequency terms in a
@ -263,7 +262,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
builder.endObject();
}
public static Optional<CommonTermsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static CommonTermsQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -348,7 +347,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
}
}
return Optional.of(new CommonTermsQueryBuilder(fieldName, text)
return new CommonTermsQueryBuilder(fieldName, text)
.lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch)
.highFreqMinimumShouldMatch(highFreqMinimumShouldMatch)
.analyzer(analyzer)
@ -357,7 +356,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
.disableCoord(disableCoord)
.cutoffFrequency(cutoffFrequency)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* A query that wraps a filter and simply returns a constant score equal to the
@ -86,10 +85,10 @@ public class ConstantScoreQueryBuilder extends AbstractQueryBuilder<ConstantScor
builder.endObject();
}
public static Optional<ConstantScoreQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static ConstantScoreQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
Optional<QueryBuilder> query = null;
QueryBuilder query = null;
boolean queryFound = false;
String queryName = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -130,15 +129,10 @@ public class ConstantScoreQueryBuilder extends AbstractQueryBuilder<ConstantScor
throw new ParsingException(parser.getTokenLocation(), "[constant_score] requires a 'filter' element");
}
if (query.isPresent() == false) {
// if inner query is empty, bubble this up to caller so they can decide how to deal with it
return Optional.empty();
}
ConstantScoreQueryBuilder constantScoreBuilder = new ConstantScoreQueryBuilder(query.get());
ConstantScoreQueryBuilder constantScoreBuilder = new ConstantScoreQueryBuilder(query);
constantScoreBuilder.boost(boost);
constantScoreBuilder.queryName(queryName);
return Optional.of(constantScoreBuilder);
return constantScoreBuilder;
}
@Override

View File

@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
/**
* A query that generates the union of documents produced by its sub-queries, and that scores each document
@ -122,7 +121,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
builder.endObject();
}
public static Optional<DisMaxQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static DisMaxQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -140,7 +139,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERIES_FIELD)) {
queriesFound = true;
parseContext.parseInnerQueryBuilder().ifPresent(queries::add);
queries.add(parseContext.parseInnerQueryBuilder());
} else {
throw new ParsingException(parser.getTokenLocation(), "[dis_max] query does not support [" + currentFieldName + "]");
}
@ -148,7 +147,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERIES_FIELD)) {
queriesFound = true;
while (token != XContentParser.Token.END_ARRAY) {
parseContext.parseInnerQueryBuilder().ifPresent(queries::add);
queries.add(parseContext.parseInnerQueryBuilder());
token = parser.nextToken();
}
} else {
@ -178,7 +177,7 @@ public class DisMaxQueryBuilder extends AbstractQueryBuilder<DisMaxQueryBuilder>
for (QueryBuilder query : queries) {
disMaxQuery.add(query);
}
return Optional.of(disMaxQuery);
return disMaxQuery;
}
@Override

View File

@ -37,7 +37,6 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Objects;
import java.util.Optional;
/**
* Constructs a query that only match on documents that the field has a value in them.
@ -84,7 +83,7 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder<ExistsQueryBuilder>
builder.endObject();
}
public static Optional<ExistsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static ExistsQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldPattern = null;
@ -120,7 +119,7 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder<ExistsQueryBuilder>
ExistsQueryBuilder builder = new ExistsQueryBuilder(fieldPattern);
builder.queryName(queryName);
builder.boost(boost);
return Optional.of(builder);
return builder;
}
@Override

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder<FieldMaskingSpanQueryBuilder> implements SpanQueryBuilder {
public static final String NAME = "field_masking_span";
@ -101,7 +100,7 @@ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder<FieldMask
builder.endObject();
}
public static Optional<FieldMaskingSpanQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static FieldMaskingSpanQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -117,11 +116,11 @@ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder<FieldMask
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, QUERY_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "[field_masking_span] query must be of type span query");
}
inner = (SpanQueryBuilder) query.get();
inner = (SpanQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(), "[field_masking_span] query does not support ["
+ currentFieldName + "]");
@ -149,7 +148,7 @@ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder<FieldMask
FieldMaskingSpanQueryBuilder queryBuilder = new FieldMaskingSpanQueryBuilder(inner, field);
queryBuilder.boost(boost);
queryBuilder.queryName(queryName);
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -28,8 +28,6 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -39,20 +37,13 @@ import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* A Query that does fuzzy matching for a specific value.
*
* @deprecated Fuzzy queries are not useful enough. This class will be removed with Elasticsearch 4.0. In most cases you may want to use
* a match query with the fuzziness parameter for strings or range queries for numeric and date fields.
*/
@Deprecated
public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> implements MultiTermQueryBuilder {
public static final String NAME = "fuzzy";
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(FuzzyQueryBuilder.class));
/** Default maximum edit distance. Defaults to AUTO. */
public static final Fuzziness DEFAULT_FUZZINESS = Fuzziness.AUTO;
@ -155,7 +146,6 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
* @param value The value of the term
*/
public FuzzyQueryBuilder(String fieldName, Object value) {
DEPRECATION_LOGGER.deprecated("{} query is deprecated. Instead use the [match] query with fuzziness parameter", NAME);
if (Strings.isEmpty(fieldName)) {
throw new IllegalArgumentException("field name cannot be null or empty");
}
@ -261,7 +251,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
builder.endObject();
}
public static Optional<FuzzyQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static FuzzyQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
Object value = null;
@ -316,14 +306,14 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
value = parser.objectBytes();
}
}
return Optional.of(new FuzzyQueryBuilder(fieldName, value)
return new FuzzyQueryBuilder(fieldName, value)
.fuzziness(fuzziness)
.prefixLength(prefixLength)
.maxExpansions(maxExpansions)
.transpositions(transpositions)
.rewrite(rewrite)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Creates a Lucene query that will filter for all documents that lie within the specified
@ -374,7 +373,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
builder.endObject();
}
public static Optional<GeoBoundingBoxQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static GeoBoundingBoxQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -485,7 +484,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
} else {
builder.setValidationMethod(GeoValidationMethod.infer(coerce, ignoreMalformed));
}
return Optional.of(builder);
return builder;
}
@Override

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
/**
* Filter results of a query to include only those within a specific distance to some
@ -310,7 +309,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
builder.endObject();
}
public static Optional<GeoDistanceQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static GeoDistanceQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token;
@ -425,7 +424,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
qb.boost(boost);
qb.queryName(queryName);
qb.ignoreUnmapped(ignoreUnmapped);
return Optional.of(qb);
return qb;
}
@Override

View File

@ -40,7 +40,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQueryBuilder> {
public static final String NAME = "geo_polygon";
@ -227,7 +226,7 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
builder.endObject();
}
public static Optional<GeoPolygonQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static GeoPolygonQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -308,7 +307,7 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
builder.boost(boost);
}
builder.ignoreUnmapped(ignoreUnmapped);
return Optional.of(builder);
return builder;
}
@Override

View File

@ -47,7 +47,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* {@link QueryBuilder} that builds a GeoShape Query
@ -456,7 +455,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
builder.endObject();
}
public static Optional<GeoShapeQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static GeoShapeQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -562,7 +561,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
}
builder.boost(boost);
builder.ignoreUnmapped(ignoreUnmapped);
return Optional.of(builder);
return builder;
}
@Override

View File

@ -43,7 +43,6 @@ import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* A query builder for <tt>has_child</tt> query.
@ -222,7 +221,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
builder.endObject();
}
public static Optional<HasChildQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static HasChildQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String childType = null;
@ -234,7 +233,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
InnerHitBuilder innerHitBuilder = null;
String currentFieldName = null;
XContentParser.Token token;
Optional<QueryBuilder> iqb = Optional.empty();
QueryBuilder iqb = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@ -268,13 +267,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
}
}
}
if (iqb.isPresent() == false) {
// if inner query is empty, bubble this up to caller so they can decide how to deal with it
return Optional.empty();
}
HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(childType, iqb.get(), scoreMode);
HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(childType, iqb, scoreMode);
if (innerHitBuilder != null) {
hasChildQueryBuilder.innerHit(innerHitBuilder);
}
@ -282,7 +275,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
hasChildQueryBuilder.queryName(queryName);
hasChildQueryBuilder.boost(boost);
hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped);
return Optional.of(hasChildQueryBuilder);
return hasChildQueryBuilder;
}
public static ScoreMode parseScoreMode(String scoreModeString) {

View File

@ -38,7 +38,6 @@ import java.io.IOException;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
/**
@ -223,7 +222,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
builder.endObject();
}
public static Optional<HasParentQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static HasParentQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String parentType = null;
@ -234,7 +233,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
String currentFieldName = null;
XContentParser.Token token;
Optional<QueryBuilder> iqb = Optional.empty();
QueryBuilder iqb = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@ -272,18 +271,14 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
}
}
}
if (iqb.isPresent() == false) {
// if inner query is empty, bubble this up to caller so they can decide how to deal with it
return Optional.empty();
}
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder(parentType, iqb.get(), score)
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder(parentType, iqb, score)
.ignoreUnmapped(ignoreUnmapped)
.queryName(queryName)
.boost(boost);
if (innerHits != null) {
queryBuilder.innerHit(innerHits);
}
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -39,7 +39,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
@ -146,9 +145,9 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
declareStandardFields(PARSER);
}
public static Optional<IdsQueryBuilder> fromXContent(QueryParseContext context) {
public static IdsQueryBuilder fromXContent(QueryParseContext context) {
try {
return Optional.of(PARSER.apply(context.parser(), context));
return PARSER.apply(context.parser(), context);
} catch (IllegalArgumentException e) {
throw new ParsingException(context.parser().getTokenLocation(), e.getMessage(), e);
}

View File

@ -64,9 +64,9 @@ public class MatchAllQueryBuilder extends AbstractQueryBuilder<MatchAllQueryBuil
declareStandardFields(PARSER);
}
public static Optional<MatchAllQueryBuilder> fromXContent(QueryParseContext context) {
public static MatchAllQueryBuilder fromXContent(QueryParseContext context) {
try {
return Optional.of(PARSER.apply(context.parser(), context));
return PARSER.apply(context.parser(), context);
} catch (IllegalArgumentException e) {
throw new ParsingException(context.parser().getTokenLocation(), e.getMessage(), e);
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Optional;
/**
* A query that matches no document.
@ -58,7 +57,7 @@ public class MatchNoneQueryBuilder extends AbstractQueryBuilder<MatchNoneQueryBu
builder.endObject();
}
public static Optional<MatchNoneQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MatchNoneQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String currentFieldName = null;
@ -86,7 +85,7 @@ public class MatchNoneQueryBuilder extends AbstractQueryBuilder<MatchNoneQueryBu
MatchNoneQueryBuilder matchNoneQueryBuilder = new MatchNoneQueryBuilder();
matchNoneQueryBuilder.boost(boost);
matchNoneQueryBuilder.queryName(queryName);
return Optional.of(matchNoneQueryBuilder);
return matchNoneQueryBuilder;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.search.MatchQuery;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Match query is a query that analyzes the text and constructs a phrase prefix
@ -190,7 +189,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
return Objects.hash(fieldName, value, analyzer, slop, maxExpansions);
}
public static Optional<MatchPhrasePrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MatchPhrasePrefixQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
Object value = null;
@ -247,6 +246,6 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
matchQuery.maxExpansions(maxExpansion);
matchQuery.queryName(queryName);
matchQuery.boost(boost);
return Optional.of(matchQuery);
return matchQuery;
}
}

View File

@ -31,7 +31,6 @@ import org.elasticsearch.index.search.MatchQuery;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Match query is a query that analyzes the text and constructs a phrase query
@ -162,7 +161,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
return Objects.hash(fieldName, value, analyzer, slop);
}
public static Optional<MatchPhraseQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MatchPhraseQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
Object value = null;
@ -215,6 +214,6 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder<MatchPhraseQue
matchQuery.slop(slop);
matchQuery.queryName(queryName);
matchQuery.boost(boost);
return Optional.of(matchQuery);
return matchQuery;
}
}

View File

@ -40,7 +40,6 @@ import java.io.IOException;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
/**
* Match query is a query that analyzes the text and constructs a query as the
@ -524,7 +523,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
return NAME;
}
public static Optional<MatchQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MatchQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
MatchQuery.Type type = MatchQuery.Type.BOOLEAN;
@ -646,7 +645,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
matchQuery.zeroTermsQuery(zeroTermsQuery);
matchQuery.queryName(queryName);
matchQuery.boost(boost);
return Optional.of(matchQuery);
return matchQuery;
}
}

View File

@ -802,7 +802,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
builder.endObject();
}
public static Optional<MoreLikeThisQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MoreLikeThisQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
// document inputs
@ -952,7 +952,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
if (stopWords != null) {
moreLikeThisQueryBuilder.stopWords(stopWords);
}
return Optional.of(moreLikeThisQueryBuilder);
return moreLikeThisQueryBuilder;
}
private static void parseLikeField(QueryParseContext parseContext, List<String> texts, List<Item> items) throws IOException {

View File

@ -556,7 +556,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
builder.endObject();
}
public static Optional<MultiMatchQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static MultiMatchQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
Object value = null;
@ -657,10 +657,10 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
if (fuzziness != null && (type == Type.CROSS_FIELDS || type == Type.PHRASE || type == Type.PHRASE_PREFIX)) {
throw new ParsingException(parser.getTokenLocation(),
"Fuziness not allowed for type [" + type.parseField.getPreferredName() + "]");
"Fuzziness not allowed for type [" + type.parseField.getPreferredName() + "]");
}
return Optional.of(new MultiMatchQueryBuilder(value)
return new MultiMatchQueryBuilder(value)
.fields(fieldsBoosts)
.type(type)
.analyzer(analyzer)
@ -677,7 +677,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
.tieBreaker(tieBreaker)
.zeroTermsQuery(zeroTermsQuery)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
private static void parseFieldAndBoost(XContentParser parser, Map<String, Float> fieldsBoosts) throws IOException {

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.mapper.ObjectMapper;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder> {
public static final String NAME = "nested";
@ -152,12 +151,12 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
builder.endObject();
}
public static Optional<NestedQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static NestedQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
ScoreMode scoreMode = ScoreMode.Avg;
String queryName = null;
Optional<QueryBuilder> query = Optional.empty();
QueryBuilder query = null;
String path = null;
String currentFieldName = null;
InnerHitBuilder innerHitBuilder = null;
@ -190,19 +189,14 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
}
}
}
if (query.isPresent() == false) {
// if inner query is empty, bubble this up to caller so they can decide how to deal with it
return Optional.empty();
}
NestedQueryBuilder queryBuilder = new NestedQueryBuilder(path, query.get(), scoreMode)
NestedQueryBuilder queryBuilder = new NestedQueryBuilder(path, query, scoreMode)
.ignoreUnmapped(ignoreUnmapped)
.queryName(queryName)
.boost(boost);
if (innerHitBuilder != null) {
queryBuilder.innerHit(innerHitBuilder);
}
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.TypeFieldMapper;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public final class ParentIdQueryBuilder extends AbstractQueryBuilder<ParentIdQueryBuilder> {
public static final String NAME = "parent_id";
@ -116,7 +115,7 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder<ParentIdQue
builder.endObject();
}
public static Optional<ParentIdQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static ParentIdQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String type = null;
@ -150,7 +149,7 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder<ParentIdQue
queryBuilder.queryName(queryName);
queryBuilder.boost(boost);
queryBuilder.ignoreUnmapped(ignoreUnmapped);
return Optional.of(queryBuilder);
return queryBuilder;
}

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* A Query that matches documents containing terms with a specified prefix.
@ -117,7 +116,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
builder.endObject();
}
public static Optional<PrefixQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static PrefixQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -161,10 +160,10 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
}
}
return Optional.of(new PrefixQueryBuilder(fieldName, value)
return new PrefixQueryBuilder(fieldName, value)
.rewrite(rewrite)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -197,13 +197,9 @@ public abstract class QueryBuilders {
* @param name The name of the field
* @param value The value of the term
*
* @deprecated Fuzzy queries are not useful enough and will be removed with Elasticsearch 4.0. In most cases you may want to use
* a match query with the fuzziness parameter for strings or range queries for numeric and date fields.
*
* @see #matchQuery(String, Object)
* @see #rangeQuery(String)
*/
@Deprecated
public static FuzzyQueryBuilder fuzzyQuery(String name, String value) {
return new FuzzyQueryBuilder(name, value);
}
@ -214,13 +210,9 @@ public abstract class QueryBuilders {
* @param name The name of the field
* @param value The value of the term
*
* @deprecated Fuzzy queries are not useful enough and will be removed with Elasticsearch 4.0. In most cases you may want to use
* a match query with the fuzziness parameter for strings or range queries for numeric and date fields.
*
* @see #matchQuery(String, Object)
* @see #rangeQuery(String)
*/
@Deprecated
public static FuzzyQueryBuilder fuzzyQuery(String name, Object value) {
return new FuzzyQueryBuilder(name, value);
}

View File

@ -23,20 +23,15 @@ import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.Script;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class QueryParseContext implements ParseFieldMatcherSupplier {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(QueryParseContext.class));
private static final ParseField CACHE = new ParseField("_cache").withAllDeprecated("Elasticsearch makes its own caching decisions");
private static final ParseField CACHE_KEY = new ParseField("_cache_key").withAllDeprecated("Filters are always used as cache keys");
@ -75,7 +70,7 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
if (token == XContentParser.Token.FIELD_NAME) {
String fieldName = parser.currentName();
if ("query".equals(fieldName)) {
queryBuilder = parseInnerQueryBuilder().orElse(null);
queryBuilder = parseInnerQueryBuilder();
} else {
throw new ParsingException(parser.getTokenLocation(), "request does not support [" + parser.currentName() + "]");
}
@ -92,20 +87,15 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
/**
* Parses a query excluding the query element that wraps it
*/
public Optional<QueryBuilder> parseInnerQueryBuilder() throws IOException {
public QueryBuilder parseInnerQueryBuilder() throws IOException {
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, must start with start_object");
}
}
if (parser.nextToken() == XContentParser.Token.END_OBJECT) {
// we encountered '{}' for a query clause
String msg = "query malformed, empty clause found at [" + parser.getTokenLocation() +"]";
DEPRECATION_LOGGER.deprecated(msg);
if (parseFieldMatcher.isStrict()) {
throw new IllegalArgumentException(msg);
}
return Optional.empty();
// we encountered '{}' for a query clause, it used to be supported, deprecated in 5.0 and removed in 6.0
throw new IllegalArgumentException("query malformed, empty clause found at [" + parser.getTokenLocation() +"]");
}
if (parser.currentToken() != XContentParser.Token.FIELD_NAME) {
throw new ParsingException(parser.getTokenLocation(), "[_na] query malformed, no field after start_object");
@ -115,9 +105,7 @@ public class QueryParseContext implements ParseFieldMatcherSupplier {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "[" + queryName + "] query malformed, no start_object after query name");
}
@SuppressWarnings("unchecked")
Optional<QueryBuilder> result = (Optional<QueryBuilder>) indicesQueriesRegistry.lookup(queryName, parseFieldMatcher,
parser.getTokenLocation()).fromXContent(this);
QueryBuilder result = indicesQueriesRegistry.lookup(queryName, parseFieldMatcher, parser.getTokenLocation()).fromXContent(this);
//end_object of the specific query (e.g. match, multi_match etc.) element
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
throw new ParsingException(parser.getTokenLocation(),

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.query;
import java.io.IOException;
import java.util.Optional;
/**
* Defines a query parser that is able to parse {@link QueryBuilder}s from {@link org.elasticsearch.common.xcontent.XContent}.
@ -37,5 +36,5 @@ public interface QueryParser<QB extends QueryBuilder> {
* call
* @return the new QueryBuilder
*/
Optional<QB> fromXContent(QueryParseContext parseContext) throws IOException;
QB fromXContent(QueryParseContext parseContext) throws IOException;
}

View File

@ -57,7 +57,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
@ -685,7 +684,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
builder.endObject();
}
public static Optional<QueryStringQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static QueryStringQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String currentFieldName = null;
XContentParser.Token token;
@ -855,7 +854,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
queryStringQuery.queryName(queryName);
queryStringQuery.splitOnWhitespace(splitOnWhitespace);
queryStringQuery.useAllFields(useAllFields);
return Optional.of(queryStringQuery);
return queryStringQuery;
}
@Override

View File

@ -44,7 +44,6 @@ import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* A Query that matches documents within an range of terms.
@ -344,7 +343,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
builder.endObject();
}
public static Optional<RangeQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static RangeQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -435,7 +434,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
if (relation != null) {
rangeQuery.relation(relation);
}
return Optional.of(rangeQuery);
return rangeQuery;
}
@Override

View File

@ -37,7 +37,6 @@ import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* A Query that does fuzzy matching for a specific value.
@ -178,7 +177,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
builder.endObject();
}
public static Optional<RegexpQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static RegexpQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
String rewrite = null;
@ -233,12 +232,12 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
}
}
return Optional.of(new RegexpQueryBuilder(fieldName, value)
return new RegexpQueryBuilder(fieldName, value)
.flags(flagsValue)
.maxDeterminizedStates(maxDeterminizedStates)
.rewrite(rewrite)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -38,7 +38,6 @@ import org.elasticsearch.script.SearchScript;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> {
public static final String NAME = "script";
@ -84,7 +83,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
builder.endObject();
}
public static Optional<ScriptQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static ScriptQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
// also, when caching, since its isCacheable is false, will result in loading all bit set...
Script script = null;
@ -122,9 +121,9 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
throw new ParsingException(parser.getTokenLocation(), "script must be provided with a [script] filter");
}
return Optional.of(new ScriptQueryBuilder(script)
return new ScriptQueryBuilder(script)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -40,7 +40,6 @@ import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.TreeMap;
/**
@ -471,7 +470,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
builder.endObject();
}
public static Optional<SimpleQueryStringBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SimpleQueryStringBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String currentFieldName = null;
@ -580,7 +579,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
}
qb.analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix);
qb.useAllFields(useAllFields);
return Optional.of(qb);
return qb;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Builder for {@link org.apache.lucene.search.spans.SpanContainingQuery}.
@ -100,7 +99,7 @@ public class SpanContainingQueryBuilder extends AbstractQueryBuilder<SpanContain
builder.endObject();
}
public static Optional<SpanContainingQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanContainingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String queryName = null;
@ -114,17 +113,17 @@ public class SpanContainingQueryBuilder extends AbstractQueryBuilder<SpanContain
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, BIG_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "span_containing [big] must be of type span query");
}
big = (SpanQueryBuilder) query.get();
big = (SpanQueryBuilder) query;
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LITTLE_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "span_containing [little] must be of type span query");
}
little = (SpanQueryBuilder) query.get();
little = (SpanQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(),
"[span_containing] query does not support [" + currentFieldName + "]");
@ -141,7 +140,7 @@ public class SpanContainingQueryBuilder extends AbstractQueryBuilder<SpanContain
SpanContainingQueryBuilder query = new SpanContainingQueryBuilder(big, little);
query.boost(boost).queryName(queryName);
return Optional.of(query);
return query;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class SpanFirstQueryBuilder extends AbstractQueryBuilder<SpanFirstQueryBuilder> implements SpanQueryBuilder {
public static final String NAME = "span_first";
@ -100,7 +99,7 @@ public class SpanFirstQueryBuilder extends AbstractQueryBuilder<SpanFirstQueryBu
builder.endObject();
}
public static Optional<SpanFirstQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanFirstQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -116,11 +115,11 @@ public class SpanFirstQueryBuilder extends AbstractQueryBuilder<SpanFirstQueryBu
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, MATCH_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "spanFirst [match] must be of type span query");
}
match = (SpanQueryBuilder) query.get();
match = (SpanQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_first] query does not support [" + currentFieldName + "]");
}
@ -144,7 +143,7 @@ public class SpanFirstQueryBuilder extends AbstractQueryBuilder<SpanFirstQueryBu
}
SpanFirstQueryBuilder queryBuilder = new SpanFirstQueryBuilder(match, end);
queryBuilder.boost(boost).queryName(queryName);
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -33,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Query that allows wraping a {@link MultiTermQueryBuilder} (one of wildcard, fuzzy, prefix, term, range or regexp query)
@ -82,7 +81,7 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder<SpanMultiTer
builder.endObject();
}
public static Optional<SpanMultiTermQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanMultiTermQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String currentFieldName = null;
MultiTermQueryBuilder subQuery = null;
@ -94,12 +93,12 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder<SpanMultiTer
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, MATCH_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof MultiTermQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof MultiTermQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(),
"[span_multi] [" + MATCH_FIELD.getPreferredName() + "] must be of type multi term query");
}
subQuery = (MultiTermQueryBuilder) query.get();
subQuery = (MultiTermQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_multi] query does not support [" + currentFieldName + "]");
}
@ -119,7 +118,7 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder<SpanMultiTer
"[span_multi] must have [" + MATCH_FIELD.getPreferredName() + "] multi term query clause");
}
return Optional.of(new SpanMultiTermQueryBuilder(subQuery).queryName(queryName).boost(boost));
return new SpanMultiTermQueryBuilder(subQuery).queryName(queryName).boost(boost);
}
@Override

View File

@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
/**
* Matches spans which are near one another. One can specify slop, the maximum number
@ -144,7 +143,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
builder.endObject();
}
public static Optional<SpanNearQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanNearQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -162,11 +161,11 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
} else if (token == XContentParser.Token.START_ARRAY) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, CLAUSES_FIELD)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "spanNear [clauses] must be of type span query");
}
clauses.add((SpanQueryBuilder) query.get());
clauses.add((SpanQueryBuilder) query);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_near] query does not support [" + currentFieldName + "]");
@ -203,7 +202,7 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
queryBuilder.inOrder(inOrder);
queryBuilder.boost(boost);
queryBuilder.queryName(queryName);
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class SpanNotQueryBuilder extends AbstractQueryBuilder<SpanNotQueryBuilder> implements SpanQueryBuilder {
public static final String NAME = "span_not";
@ -161,7 +160,7 @@ public class SpanNotQueryBuilder extends AbstractQueryBuilder<SpanNotQueryBuilde
builder.endObject();
}
public static Optional<SpanNotQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanNotQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -182,17 +181,17 @@ public class SpanNotQueryBuilder extends AbstractQueryBuilder<SpanNotQueryBuilde
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, INCLUDE_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "spanNot [include] must be of type span query");
}
include = (SpanQueryBuilder) query.get();
include = (SpanQueryBuilder) query;
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, EXCLUDE_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "spanNot [exclude] must be of type span query");
}
exclude = (SpanQueryBuilder) query.get();
exclude = (SpanQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_not] query does not support [" + currentFieldName + "]");
}
@ -234,7 +233,7 @@ public class SpanNotQueryBuilder extends AbstractQueryBuilder<SpanNotQueryBuilde
}
spanNotQuery.boost(boost);
spanNotQuery.queryName(queryName);
return Optional.of(spanNotQuery);
return spanNotQuery;
}
@Override

View File

@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
/**
* Span query that matches the union of its clauses. Maps to {@link SpanOrQuery}.
@ -98,7 +97,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
builder.endObject();
}
public static Optional<SpanOrQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanOrQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -114,11 +113,11 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
} else if (token == XContentParser.Token.START_ARRAY) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, CLAUSES_FIELD)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "spanOr [clauses] must be of type span query");
}
clauses.add((SpanQueryBuilder) query.get());
clauses.add((SpanQueryBuilder) query);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]");
@ -144,7 +143,7 @@ public class SpanOrQueryBuilder extends AbstractQueryBuilder<SpanOrQueryBuilder>
}
queryBuilder.boost(boost);
queryBuilder.queryName(queryName);
return Optional.of(queryBuilder);
return queryBuilder;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Optional;
/**
* A Span Query that matches documents containing a term.
@ -92,7 +91,7 @@ public class SpanTermQueryBuilder extends BaseTermQueryBuilder<SpanTermQueryBuil
return new SpanTermQuery(term);
}
public static Optional<SpanTermQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
public static SpanTermQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException, ParsingException {
XContentParser parser = parseContext.parser();
String fieldName = null;
Object value = null;
@ -133,7 +132,7 @@ public class SpanTermQueryBuilder extends BaseTermQueryBuilder<SpanTermQueryBuil
SpanTermQueryBuilder result = new SpanTermQueryBuilder(fieldName, value);
result.boost(boost).queryName(queryName);
return Optional.of(result);
return result;
}
@Override

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Builder for {@link org.apache.lucene.search.spans.SpanWithinQuery}.
@ -105,7 +104,7 @@ public class SpanWithinQueryBuilder extends AbstractQueryBuilder<SpanWithinQuery
builder.endObject();
}
public static Optional<SpanWithinQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static SpanWithinQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
@ -120,17 +119,17 @@ public class SpanWithinQueryBuilder extends AbstractQueryBuilder<SpanWithinQuery
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, BIG_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "span_within [big] must be of type span query");
}
big = (SpanQueryBuilder) query.get();
big = (SpanQueryBuilder) query;
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, LITTLE_FIELD)) {
Optional<QueryBuilder> query = parseContext.parseInnerQueryBuilder();
if (query.isPresent() == false || query.get() instanceof SpanQueryBuilder == false) {
QueryBuilder query = parseContext.parseInnerQueryBuilder();
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "span_within [little] must be of type span query");
}
little = (SpanQueryBuilder) query.get();
little = (SpanQueryBuilder) query;
} else {
throw new ParsingException(parser.getTokenLocation(),
"[span_within] query does not support [" + currentFieldName + "]");
@ -153,7 +152,7 @@ public class SpanWithinQueryBuilder extends AbstractQueryBuilder<SpanWithinQuery
SpanWithinQueryBuilder query = new SpanWithinQueryBuilder(big, little);
query.boost(boost).queryName(queryName);
return Optional.of(query);
return query;
}
@Override

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
import java.util.Optional;
/**
* A Query that matches documents containing a term.
@ -83,7 +82,7 @@ public class TermQueryBuilder extends BaseTermQueryBuilder<TermQueryBuilder> {
super(in);
}
public static Optional<TermQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static TermQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String queryName = null;
@ -132,7 +131,7 @@ public class TermQueryBuilder extends BaseTermQueryBuilder<TermQueryBuilder> {
if (queryName != null) {
termQuery.queryName(queryName);
}
return Optional.of(termQuery);
return termQuery;
}
@Override

View File

@ -48,7 +48,6 @@ import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@ -321,7 +320,7 @@ public class TermsQueryBuilder extends AbstractQueryBuilder<TermsQueryBuilder> {
builder.endObject();
}
public static Optional<TermsQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static TermsQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
@ -372,9 +371,9 @@ public class TermsQueryBuilder extends AbstractQueryBuilder<TermsQueryBuilder> {
throw new ParsingException(parser.getTokenLocation(), "[" + TermsQueryBuilder.NAME + "] query requires a field name, " +
"followed by array of terms or a document lookup specification");
}
return Optional.of(new TermsQueryBuilder(fieldName, values, termsLookup)
return new TermsQueryBuilder(fieldName, values, termsLookup)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
private static List<Object> parseValues(XContentParser parser) throws IOException {

View File

@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
public static final String NAME = "type";
@ -81,7 +80,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
builder.endObject();
}
public static Optional<TypeQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static TypeQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
BytesRef type = null;
@ -114,9 +113,9 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
throw new ParsingException(parser.getTokenLocation(),
"[" + TypeQueryBuilder.NAME + "] filter needs to be provided with a value for the type");
}
return Optional.of(new TypeQueryBuilder(type)
return new TypeQueryBuilder(type)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
/**
* Implements the wildcard search query. Supported wildcards are <tt>*</tt>, which
@ -133,7 +132,7 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder<WildcardQueryBuil
builder.endObject();
}
public static Optional<WildcardQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static WildcardQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
String fieldName = null;
String rewrite = null;
@ -177,10 +176,10 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder<WildcardQueryBuil
}
}
return Optional.of(new WildcardQueryBuilder(fieldName, value)
return new WildcardQueryBuilder(fieldName, value)
.rewrite(rewrite)
.boost(boost)
.queryName(queryName));
.queryName(queryName);
}
@Override

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Optional;
/**
* A Query builder which allows building a query given JSON string or binary data provided as input. This is useful when you want
@ -116,7 +115,7 @@ public class WrapperQueryBuilder extends AbstractQueryBuilder<WrapperQueryBuilde
builder.endObject();
}
public static Optional<WrapperQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
public static WrapperQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token = parser.nextToken();
@ -136,7 +135,7 @@ public class WrapperQueryBuilder extends AbstractQueryBuilder<WrapperQueryBuilde
if (source == null) {
throw new ParsingException(parser.getTokenLocation(), "wrapper query has no [query] specified");
}
return Optional.of(new WrapperQueryBuilder(source));
return new WrapperQueryBuilder(source);
}
@Override
@ -164,8 +163,7 @@ public class WrapperQueryBuilder extends AbstractQueryBuilder<WrapperQueryBuilde
try (XContentParser qSourceParser = XContentFactory.xContent(source).createParser(source)) {
QueryParseContext parseContext = context.newParseContext(qSourceParser);
final QueryBuilder queryBuilder = parseContext.parseInnerQueryBuilder().orElseThrow(
() -> new ParsingException(qSourceParser.getTokenLocation(), "inner query cannot be empty"));
final QueryBuilder queryBuilder = parseContext.parseInnerQueryBuilder();
if (boost() != DEFAULT_BOOST || queryName() != null) {
final BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.must(queryBuilder);

View File

@ -40,7 +40,6 @@ import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.InnerHitBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
@ -52,7 +51,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
/**
* A query that uses a filters with a script associated with them to compute the
@ -437,7 +435,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
InnerHitBuilder.extractInnerHits(query(), innerHits);
}
public static Optional<FunctionScoreQueryBuilder> fromXContent(ParseFieldRegistry<ScoreFunctionParser<?>> scoreFunctionsRegistry,
public static FunctionScoreQueryBuilder fromXContent(ParseFieldRegistry<ScoreFunctionParser<?>> scoreFunctionsRegistry,
QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
@ -467,7 +465,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. [query] is already defined.",
NAME);
}
query = parseContext.parseInnerQueryBuilder().orElse(QueryBuilders.matchAllQuery());
query = parseContext.parseInnerQueryBuilder();
} else {
if (singleFunctionFound) {
throw new ParsingException(parser.getTokenLocation(),
@ -555,7 +553,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
}
functionScoreQueryBuilder.boost(boost);
functionScoreQueryBuilder.queryName(queryName);
return Optional.of(functionScoreQueryBuilder);
return functionScoreQueryBuilder;
}
private static void handleMisplacedFunctionsDeclaration(XContentLocation contentLocation, String errorString) {
@ -583,7 +581,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, FILTER_FIELD)) {
filter = parseContext.parseInnerQueryBuilder().orElse(QueryBuilders.matchAllQuery());
filter = parseContext.parseInnerQueryBuilder();
} else {
if (scoreFunction != null) {
throw new ParsingException(parser.getTokenLocation(),

View File

@ -510,7 +510,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
boolean isRetry) {
try {
verifyPrimary();
return prepareIndex(docMapper(source.type()), source, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType,
return prepareIndex(docMapper(source.type()), source, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType,
Engine.Operation.Origin.PRIMARY, autoGeneratedIdTimestamp, isRetry);
} catch (Exception e) {
verifyNotClosed(e);
@ -522,16 +522,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
boolean isRetry) {
try {
verifyReplicationTarget();
return prepareIndex(docMapper(source.type()), source, seqNo, version, versionType, Engine.Operation.Origin.REPLICA, autoGeneratedIdTimestamp,
isRetry);
return prepareIndex(docMapper(source.type()), source, seqNo, primaryTerm, version, versionType,
Engine.Operation.Origin.REPLICA, autoGeneratedIdTimestamp, isRetry);
} catch (Exception e) {
verifyNotClosed(e);
throw e;
}
}
static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long seqNo, long version, VersionType versionType,
Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry) {
static Engine.Index prepareIndex(DocumentMapperForType docMapper, SourceToParse source, long seqNo, long primaryTerm, long version,
VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp,
boolean isRetry) {
long startTime = System.nanoTime();
ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
if (docMapper.getMapping() != null) {
@ -540,8 +541,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
MappedFieldType uidFieldType = docMapper.getDocumentMapper().uidMapper().fieldType();
Query uidQuery = uidFieldType.termQuery(doc.uid(), null);
Term uid = MappedFieldType.extractTerm(uidQuery);
doc.seqNo().setLongValue(seqNo);
return new Engine.Index(uid, doc, seqNo, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry);
return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry);
}
public Engine.IndexResult index(Engine.Index index) {
@ -573,21 +573,24 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType();
final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null);
final Term uid = MappedFieldType.extractTerm(uidQuery);
return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType, Engine.Operation.Origin.PRIMARY);
return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, primaryTerm, version,
versionType, Engine.Operation.Origin.PRIMARY);
}
public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long version, VersionType versionType) {
public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long primaryTerm,
long version, VersionType versionType) {
verifyReplicationTarget();
final DocumentMapper documentMapper = docMapper(type).getDocumentMapper();
final MappedFieldType uidFieldType = documentMapper.uidMapper().fieldType();
final Query uidQuery = uidFieldType.termQuery(Uid.createUid(type, id), null);
final Term uid = MappedFieldType.extractTerm(uidQuery);
return prepareDelete(type, id, uid, seqNo, version, versionType, Engine.Operation.Origin.REPLICA);
return prepareDelete(type, id, uid, seqNo, primaryTerm, version, versionType, Engine.Operation.Origin.REPLICA);
}
static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long version, VersionType versionType, Engine.Operation.Origin origin) {
static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version,
VersionType versionType, Engine.Operation.Origin origin) {
long startTime = System.nanoTime();
return new Engine.Delete(type, id, uid, seqNo, version, versionType, origin, startTime);
return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime);
}
public Engine.DeleteResult delete(Engine.Delete delete) {
@ -756,7 +759,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyStartedOrRecovering();
verifyNotClosed();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
Engine engine = getEngine();
if (engine.isRecovering()) {
@ -776,7 +779,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
// while recovering, and we want to keep the translog at bay (up to deletes, which
// we don't gc). Yet, we don't use flush internally to clear deletes and flush the indexwriter since
// we use #writeIndexingBuffer for this now.
verifyStartedOrRecovering();
verifyNotClosed();
Engine engine = getEngine();
if (engine.isRecovering()) {
throw new IllegalIndexShardStateException(shardId(), state, "flush is only allowed if the engine is not recovery" +
@ -790,7 +793,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public void forceMerge(ForceMergeRequest forceMerge) throws IOException {
verifyStarted();
verifyActive();
if (logger.isTraceEnabled()) {
logger.trace("force merge with {}", forceMerge);
}
@ -802,7 +805,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* Upgrades the shard to the current version of Lucene and returns the minimum segment version
*/
public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOException {
verifyStarted();
verifyActive();
if (logger.isTraceEnabled()) {
logger.trace("upgrade with {}", upgrade);
}
@ -1145,13 +1148,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
}
protected final void verifyStartedOrRecovering() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering");
}
}
private void verifyNotClosed() throws IllegalIndexShardStateException {
verifyNotClosed(null);
}
@ -1167,10 +1163,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
}
protected final void verifyStarted() throws IllegalIndexShardStateException {
protected final void verifyActive() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active");
}
}

View File

@ -155,8 +155,8 @@ public class TranslogRecoveryPerformer {
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
// autoGeneratedID docs that are coming from the primary are updated correctly.
Engine.Index engineIndex = IndexShard.prepareIndex(docMapper(index.type()), source(shardId.getIndexName(), index.type(), index.id(), index.source())
.routing(index.routing()).parent(index.parent()), index.seqNo(),
index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
.routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(),
index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true);
maybeAddMappingUpdate(engineIndex.type(), engineIndex.parsedDoc().dynamicMappingsUpdate(), engineIndex.id(), allowMappingUpdates);
if (logger.isTraceEnabled()) {
logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id());
@ -170,7 +170,8 @@ public class TranslogRecoveryPerformer {
logger.trace("[translog] recover [delete] op of [{}][{}]", uid.type(), uid.id());
}
final Engine.Delete engineDelete = new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.seqNo(),
delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), origin, System.nanoTime());
delete.primaryTerm(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(),
origin, System.nanoTime());
delete(engine, engineDelete);
break;
default:

View File

@ -825,6 +825,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private final long autoGeneratedIdTimestamp;
private final String type;
private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
private long primaryTerm = 0;
private final long version;
private final VersionType versionType;
private final BytesReference source;
@ -853,6 +854,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
if (format >= FORMAT_SEQ_NO) {
seqNo = in.readVLong();
primaryTerm = in.readVLong();
}
}
@ -863,6 +865,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
this.routing = index.routing();
this.parent = index.parent();
this.seqNo = indexResult.getSeqNo();
this.primaryTerm = index.primaryTerm();
this.version = indexResult.getVersion();
this.versionType = index.versionType();
this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp();
@ -914,6 +917,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return seqNo;
}
public long primaryTerm() {
return primaryTerm;
}
public long version() {
return this.version;
}
@ -940,6 +947,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
out.writeByte(versionType.getValue());
out.writeLong(autoGeneratedIdTimestamp);
out.writeVLong(seqNo);
out.writeVLong(primaryTerm);
}
@Override
@ -955,6 +963,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (version != index.version ||
seqNo != index.seqNo ||
primaryTerm != index.primaryTerm ||
id.equals(index.id) == false ||
type.equals(index.type) == false ||
versionType != index.versionType ||
@ -974,6 +983,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
int result = id.hashCode();
result = 31 * result + type.hashCode();
result = 31 * result + Long.hashCode(seqNo);
result = 31 * result + Long.hashCode(primaryTerm);
result = 31 * result + Long.hashCode(version);
result = 31 * result + versionType.hashCode();
result = 31 * result + source.hashCode();
@ -1003,6 +1013,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private Term uid;
private long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
private long primaryTerm = 0;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
@ -1015,21 +1026,23 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
assert versionType.validateVersionForWrites(this.version);
if (format >= FORMAT_SEQ_NO) {
seqNo = in.readVLong();
primaryTerm = in.readVLong();
}
}
public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) {
this(delete.uid(), deleteResult.getSeqNo(), deleteResult.getVersion(), delete.versionType());
this(delete.uid(), deleteResult.getSeqNo(), delete.primaryTerm(), deleteResult.getVersion(), delete.versionType());
}
/** utility for testing */
public Delete(Term uid) {
this(uid, 0, Versions.MATCH_ANY, VersionType.INTERNAL);
this(uid, 0, 0, Versions.MATCH_ANY, VersionType.INTERNAL);
}
public Delete(Term uid, long seqNo, long version, VersionType versionType) {
public Delete(Term uid, long seqNo, long primaryTerm, long version, VersionType versionType) {
this.uid = uid;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
this.versionType = versionType;
}
@ -1052,6 +1065,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return seqNo;
}
public long primaryTerm() {
return primaryTerm;
}
public long version() {
return this.version;
}
@ -1073,6 +1090,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeVLong(seqNo);
out.writeVLong(primaryTerm);
}
@Override
@ -1086,7 +1104,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
Delete delete = (Delete) o;
return version == delete.version && seqNo == delete.seqNo &&
return version == delete.version &&
seqNo == delete.seqNo &&
primaryTerm == delete.primaryTerm &&
uid.equals(delete.uid) &&
versionType == delete.versionType;
}
@ -1095,6 +1115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public int hashCode() {
int result = uid.hashCode();
result = 31 * result + Long.hashCode(seqNo);
result = 31 * result + Long.hashCode(primaryTerm);
result = 31 * result + Long.hashCode(version);
result = 31 * result + versionType.hashCode();
return result;

View File

@ -45,13 +45,13 @@ import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.RangeFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.ScaledFloatFieldMapper;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.TokenCountFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
import org.elasticsearch.index.mapper.internal.SeqNoFieldMapper;
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.flush.SyncedFlushService;

View File

@ -25,15 +25,14 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ContextParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Map;
import java.util.function.BiFunction;
/**
* Encapsulates a pipeline's id and configuration as a blob
@ -55,7 +54,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
}, new ParseField("config"), ObjectParser.ValueType.OBJECT);
}
public static BiFunction<XContentParser, ParseFieldMatcherSupplier, PipelineConfiguration> getParser() {
public static ContextParser<ParseFieldMatcherSupplier, PipelineConfiguration> getParser() {
return (p, c) -> PARSER.apply(p ,c).build();
}
private static class Builder {

View File

@ -32,6 +32,7 @@ import org.elasticsearch.action.search.SearchPhaseController;
import org.elasticsearch.action.search.SearchTransportService;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.bootstrap.BootstrapCheck;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.ClusterInfoService;
@ -456,7 +457,6 @@ public class Node implements Closeable {
.map(injector::getInstance).collect(Collectors.toList()));
resourcesToClose.addAll(pluginLifecycleComponents);
this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents);
client.initialize(injector.getInstance(new Key<Map<GenericAction, TransportAction>>() {}));
logger.info("initialized");
@ -568,8 +568,8 @@ public class Node implements Closeable {
TransportService transportService = injector.getInstance(TransportService.class);
transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class));
transportService.start();
validateNodeBeforeAcceptingRequests(settings, transportService.boundAddress());
validateNodeBeforeAcceptingRequests(settings, transportService.boundAddress(), pluginsService.filterPlugins(Plugin.class).stream()
.flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList()));
DiscoveryNode localNode = DiscoveryNode.createLocal(settings,
transportService.boundAddress().publishAddress(), injector.getInstance(NodeEnvironment.class).nodeId());
@ -790,7 +790,7 @@ public class Node implements Closeable {
@SuppressWarnings("unused")
protected void validateNodeBeforeAcceptingRequests(
final Settings settings,
final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> bootstrapChecks) throws NodeValidationException {
}
/** Writes a file to the logs dir containing the ports for the given transport type */

View File

@ -22,12 +22,13 @@ package org.elasticsearch.node;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import java.util.List;
/**
* An exception thrown during node validation. Node validation runs immediately before a node
* begins accepting network requests in
* {@link Node#validateNodeBeforeAcceptingRequests(Settings, BoundTransportAddress)}. This
* exception is a checked exception that is declared as thrown from this method for the purpose
* of bubbling up to the user.
* {@link Node#validateNodeBeforeAcceptingRequests(Settings, BoundTransportAddress, List)}. This exception is a checked exception that
* is declared as thrown from this method for the purpose of bubbling up to the user.
*/
public class NodeValidationException extends Exception {

View File

@ -19,9 +19,6 @@
package org.elasticsearch.node.service;
import java.io.Closeable;
import java.io.IOException;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
@ -44,6 +41,9 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
import java.io.IOException;
public class NodeService extends AbstractComponent implements Closeable {
private final ThreadPool threadPool;
@ -111,7 +111,7 @@ public class NodeService extends AbstractComponent implements Closeable {
threadPool ? this.threadPool.stats() : null,
fs ? monitorService.fsService().stats() : null,
transport ? transportService.stats() : null,
http ? httpServer.stats() : null,
http ? (httpServer == null ? null : httpServer.stats()) : null,
circuitBreaker ? circuitBreakerService.stats() : null,
script ? scriptService.stats() : null,
discoveryStats ? discovery.stats() : null,
@ -127,4 +127,5 @@ public class NodeService extends AbstractComponent implements Closeable {
public void close() throws IOException {
indicesService.close();
}
}

View File

@ -26,6 +26,7 @@ import java.util.Collections;
import java.util.List;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.bootstrap.BootstrapCheck;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.metadata.MetaData;
@ -164,6 +165,14 @@ public abstract class Plugin implements Closeable {
return Collections.emptyList();
}
/**
* Returns a list of checks that are enforced when a node starts up once a node has the transport protocol bound to a non-loopback
* interface. In this case we assume the node is running in production and all bootstrap checks must pass. This allows plugins
* to provide a better out of the box experience by pre-configuring otherwise (in production) mandatory settings or to enforce certain
* configurations like OS settings or 3rd party resources.
*/
public List<BootstrapCheck> getBootstrapChecks() { return Collections.emptyList(); }
/**
* Close the resources opened by this plugin.
*

Some files were not shown because too many files have changed in this diff Show More