Merge branch 'master' into feature/multi_cluster_search

This commit is contained in:
Simon Willnauer 2017-01-17 12:34:36 +01:00
commit 709cb9a39e
113 changed files with 1880 additions and 1476 deletions

View File

@ -25,12 +25,6 @@ run it using Gradle:
gradle run gradle run
------------------------------------- -------------------------------------
or to attach a remote debugger, run it as:
-------------------------------------
gradle run --debug-jvm
-------------------------------------
=== Test case filtering. === Test case filtering.
- `tests.class` is a class-filtering shell-like glob pattern, - `tests.class` is a class-filtering shell-like glob pattern,
@ -480,7 +474,7 @@ Combined (Unit+Integration) coverage:
mvn -Dtests.coverage verify jacoco:report mvn -Dtests.coverage verify jacoco:report
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
== Debugging from an IDE == Launching and debugging from an IDE
If you want to run elasticsearch from your IDE, the `gradle run` task If you want to run elasticsearch from your IDE, the `gradle run` task
supports a remote debugging option: supports a remote debugging option:
@ -489,6 +483,17 @@ supports a remote debugging option:
gradle run --debug-jvm gradle run --debug-jvm
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
== Debugging remotely from an IDE
If you want to run Elasticsearch and be able to remotely attach the process
for debugging purposes from your IDE, can start Elasticsearch using `ES_JAVA_OPTS`:
---------------------------------------------------------------------------
ES_JAVA_OPTS="-Xdebug -Xrunjdwp:server=y,transport=dt_socket,address=4000,suspend=y" ./bin/elasticsearch
---------------------------------------------------------------------------
Read your IDE documentation for how to attach a debugger to a JVM process.
== Building with extra plugins == Building with extra plugins
Additional plugins may be built alongside elasticsearch, where their Additional plugins may be built alongside elasticsearch, where their
dependency on elasticsearch will be substituted with the local elasticsearch dependency on elasticsearch will be substituted with the local elasticsearch

View File

@ -430,7 +430,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />

View File

@ -49,6 +49,7 @@ import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -282,15 +283,44 @@ public class RestClient implements Closeable {
public void performRequestAsync(String method, String endpoint, Map<String, String> params, public void performRequestAsync(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
ResponseListener responseListener, Header... headers) { ResponseListener responseListener, Header... headers) {
URI uri = buildUri(pathPrefix, endpoint, params); Objects.requireNonNull(params, "params must not be null");
Map<String, String> requestParams = new HashMap<>(params);
//ignore is a special parameter supported by the clients, shouldn't be sent to es
String ignoreString = requestParams.remove("ignore");
Set<Integer> ignoreErrorCodes;
if (ignoreString == null) {
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes = Collections.singleton(404);
} else {
ignoreErrorCodes = Collections.emptySet();
}
} else {
String[] ignoresArray = ignoreString.split(",");
ignoreErrorCodes = new HashSet<>();
if (HttpHead.METHOD_NAME.equals(method)) {
//404 never causes error if returned for a HEAD request
ignoreErrorCodes.add(404);
}
for (String ignoreCode : ignoresArray) {
try {
ignoreErrorCodes.add(Integer.valueOf(ignoreCode));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead", e);
}
}
}
URI uri = buildUri(pathPrefix, endpoint, requestParams);
HttpRequestBase request = createHttpRequest(method, uri, entity); HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers); setHeaders(request, headers);
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long startTime = System.nanoTime(); long startTime = System.nanoTime();
performRequestAsync(startTime, nextHost().iterator(), request, httpAsyncResponseConsumerFactory, failureTrackingResponseListener); performRequestAsync(startTime, nextHost().iterator(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory,
failureTrackingResponseListener);
} }
private void performRequestAsync(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request, private void performRequestAsync(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
final Set<Integer> ignoreErrorCodes,
final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
final FailureTrackingResponseListener listener) { final FailureTrackingResponseListener listener) {
final HttpHost host = hosts.next(); final HttpHost host = hosts.next();
@ -304,7 +334,7 @@ public class RestClient implements Closeable {
RequestLogger.logResponse(logger, request, host, httpResponse); RequestLogger.logResponse(logger, request, host, httpResponse);
int statusCode = httpResponse.getStatusLine().getStatusCode(); int statusCode = httpResponse.getStatusLine().getStatusCode();
Response response = new Response(request.getRequestLine(), host, httpResponse); Response response = new Response(request.getRequestLine(), host, httpResponse);
if (isSuccessfulResponse(request.getMethod(), statusCode)) { if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) {
onResponse(host); onResponse(host);
listener.onSuccess(response); listener.onSuccess(response);
} else { } else {
@ -312,7 +342,7 @@ public class RestClient implements Closeable {
if (isRetryStatus(statusCode)) { if (isRetryStatus(statusCode)) {
//mark host dead and retry against next one //mark host dead and retry against next one
onFailure(host); onFailure(host);
retryIfPossible(responseException, hosts, request); retryIfPossible(responseException);
} else { } else {
//mark host alive and don't retry, as the error should be a request problem //mark host alive and don't retry, as the error should be a request problem
onResponse(host); onResponse(host);
@ -329,13 +359,13 @@ public class RestClient implements Closeable {
try { try {
RequestLogger.logFailedRequest(logger, request, host, failure); RequestLogger.logFailedRequest(logger, request, host, failure);
onFailure(host); onFailure(host);
retryIfPossible(failure, hosts, request); retryIfPossible(failure);
} catch(Exception e) { } catch(Exception e) {
listener.onDefinitiveFailure(e); listener.onDefinitiveFailure(e);
} }
} }
private void retryIfPossible(Exception exception, Iterator<HttpHost> hosts, HttpRequestBase request) { private void retryIfPossible(Exception exception) {
if (hosts.hasNext()) { if (hosts.hasNext()) {
//in case we are retrying, check whether maxRetryTimeout has been reached //in case we are retrying, check whether maxRetryTimeout has been reached
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
@ -347,7 +377,7 @@ public class RestClient implements Closeable {
} else { } else {
listener.trackFailure(exception); listener.trackFailure(exception);
request.reset(); request.reset();
performRequestAsync(startTime, hosts, request, httpAsyncResponseConsumerFactory, listener); performRequestAsync(startTime, hosts, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener);
} }
} else { } else {
listener.onDefinitiveFailure(exception); listener.onDefinitiveFailure(exception);
@ -452,8 +482,8 @@ public class RestClient implements Closeable {
client.close(); client.close();
} }
private static boolean isSuccessfulResponse(String method, int statusCode) { private static boolean isSuccessfulResponse(int statusCode) {
return statusCode < 300 || (HttpHead.METHOD_NAME.equals(method) && statusCode == 404); return statusCode < 300;
} }
private static boolean isRetryStatus(int statusCode) { private static boolean isRetryStatus(int statusCode) {
@ -510,7 +540,6 @@ public class RestClient implements Closeable {
} }
private static URI buildUri(String pathPrefix, String path, Map<String, String> params) { private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
Objects.requireNonNull(params, "params must not be null");
Objects.requireNonNull(path, "path must not be null"); Objects.requireNonNull(path, "path must not be null");
try { try {
String fullPath; String fullPath;

View File

@ -28,6 +28,8 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.SchemeIOSessionStrategy; import org.apache.http.nio.conn.SchemeIOSessionStrategy;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Objects; import java.util.Objects;
/** /**
@ -177,7 +179,12 @@ public final class RestClientBuilder {
if (failureListener == null) { if (failureListener == null) {
failureListener = new RestClient.FailureListener(); failureListener = new RestClient.FailureListener();
} }
CloseableHttpAsyncClient httpClient = createHttpClient(); CloseableHttpAsyncClient httpClient = AccessController.doPrivileged(new PrivilegedAction<CloseableHttpAsyncClient>() {
@Override
public CloseableHttpAsyncClient run() {
return createHttpClient();
}
});
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener);
httpClient.start(); httpClient.start();
return restClient; return restClient;

View File

@ -216,23 +216,45 @@ public class RestClientSingleHostTests extends RestClientTestCase {
*/ */
public void testErrorStatusCodes() throws IOException { public void testErrorStatusCodes() throws IOException {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
Set<Integer> expectedIgnores = new HashSet<>();
String ignoreParam = "";
if (HttpHead.METHOD_NAME.equals(method)) {
expectedIgnores.add(404);
}
if (randomBoolean()) {
int numIgnores = randomIntBetween(1, 3);
for (int i = 0; i < numIgnores; i++) {
Integer code = randomFrom(getAllErrorStatusCodes());
expectedIgnores.add(code);
ignoreParam += code;
if (i < numIgnores - 1) {
ignoreParam += ",";
}
}
}
//error status codes should cause an exception to be thrown //error status codes should cause an exception to be thrown
for (int errorStatusCode : getAllErrorStatusCodes()) { for (int errorStatusCode : getAllErrorStatusCodes()) {
try { try {
Response response = performRequest(method, "/" + errorStatusCode); Map<String, String> params;
if (method.equals("HEAD") && errorStatusCode == 404) { if (ignoreParam.isEmpty()) {
//no exception gets thrown although we got a 404 params = Collections.emptyMap();
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); } else {
params = Collections.singletonMap("ignore", ignoreParam);
}
Response response = performRequest(method, "/" + errorStatusCode, params);
if (expectedIgnores.contains(errorStatusCode)) {
//no exception gets thrown although we got an error status code, as it was configured to be ignored
assertEquals(errorStatusCode, response.getStatusLine().getStatusCode());
} else { } else {
fail("request should have failed"); fail("request should have failed");
} }
} catch(ResponseException e) { } catch(ResponseException e) {
if (method.equals("HEAD") && errorStatusCode == 404) { if (expectedIgnores.contains(errorStatusCode)) {
throw e; throw e;
} }
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); assertEquals(errorStatusCode, e.getResponse().getStatusLine().getStatusCode());
} }
if (errorStatusCode <= 500) { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) {
failureListener.assertNotCalled(); failureListener.assertNotCalled();
} else { } else {
failureListener.assertCalled(httpHost); failureListener.assertCalled(httpHost);
@ -351,11 +373,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
private HttpUriRequest performRandomRequest(String method) throws Exception { private HttpUriRequest performRandomRequest(String method) throws Exception {
String uriAsString = "/" + randomStatusCode(getRandom()); String uriAsString = "/" + randomStatusCode(getRandom());
URIBuilder uriBuilder = new URIBuilder(uriAsString); URIBuilder uriBuilder = new URIBuilder(uriAsString);
Map<String, String> params = Collections.emptyMap(); final Map<String, String> params = new HashMap<>();
boolean hasParams = randomBoolean(); boolean hasParams = randomBoolean();
if (hasParams) { if (hasParams) {
int numParams = randomIntBetween(1, 3); int numParams = randomIntBetween(1, 3);
params = new HashMap<>(numParams);
for (int i = 0; i < numParams; i++) { for (int i = 0; i < numParams; i++) {
String paramKey = "param-" + i; String paramKey = "param-" + i;
String paramValue = randomAsciiOfLengthBetween(3, 10); String paramValue = randomAsciiOfLengthBetween(3, 10);
@ -363,6 +384,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
uriBuilder.addParameter(paramKey, paramValue); uriBuilder.addParameter(paramKey, paramValue);
} }
} }
if (randomBoolean()) {
//randomly add some ignore parameter, which doesn't get sent as part of the request
String ignore = Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes()));
if (randomBoolean()) {
ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes()));
}
params.put("ignore", ignore);
}
URI uri = uriBuilder.build(); URI uri = uriBuilder.build();
HttpUriRequest request; HttpUriRequest request;
@ -433,16 +462,25 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
private Response performRequest(String method, String endpoint, Header... headers) throws IOException { private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
switch(randomIntBetween(0, 2)) { return performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
}
private Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
int methodSelector;
if (params.isEmpty()) {
methodSelector = randomIntBetween(0, 2);
} else {
methodSelector = randomIntBetween(1, 2);
}
switch(methodSelector) {
case 0: case 0:
return restClient.performRequest(method, endpoint, headers); return restClient.performRequest(method, endpoint, headers);
case 1: case 1:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers); return restClient.performRequest(method, endpoint, params, headers);
case 2: case 2:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers); return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers);
default: default:
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
} }
} }

View File

@ -59,7 +59,7 @@ final class RestClientTestUtil {
} }
static int randomStatusCode(Random random) { static int randomStatusCode(Random random) {
return RandomPicks.randomFrom(random, ALL_ERROR_STATUS_CODES); return RandomPicks.randomFrom(random, ALL_STATUS_CODES);
} }
static int randomOkStatusCode(Random random) { static int randomOkStatusCode(Random random) {

View File

@ -21,6 +21,7 @@ package org.elasticsearch;
import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
@ -82,7 +83,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
private static final String ERROR = "error"; private static final String ERROR = "error";
private static final String ROOT_CAUSE = "root_cause"; private static final String ROOT_CAUSE = "root_cause";
private static final Map<Integer, FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException>> ID_TO_SUPPLIER; private static final Map<Integer, CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException>> ID_TO_SUPPLIER;
private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE; private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE;
private final Map<String, List<String>> headers = new HashMap<>(); private final Map<String, List<String>> headers = new HashMap<>();
@ -223,7 +224,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
} }
public static ElasticsearchException readException(StreamInput input, int id) throws IOException { public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> elasticsearchException = ID_TO_SUPPLIER.get(id); CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> elasticsearchException = ID_TO_SUPPLIER.get(id);
if (elasticsearchException == null) { if (elasticsearchException == null) {
throw new IllegalStateException("unknown exception for id: " + id); throw new IllegalStateException("unknown exception for id: " + id);
} }
@ -792,12 +793,12 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0_UNRELEASED); org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0_UNRELEASED);
final Class<? extends ElasticsearchException> exceptionClass; final Class<? extends ElasticsearchException> exceptionClass;
final FunctionThatThrowsIOException<StreamInput, ? extends ElasticsearchException> constructor; final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;
final int id; final int id;
final Version versionAdded; final Version versionAdded;
<E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass, <E extends ElasticsearchException> ElasticsearchExceptionHandle(Class<E> exceptionClass,
FunctionThatThrowsIOException<StreamInput, E> constructor, int id, CheckedFunction<StreamInput, E, IOException> constructor, int id,
Version versionAdded) { Version versionAdded) {
// We need the exceptionClass because you can't dig it out of the constructor reliably. // We need the exceptionClass because you can't dig it out of the constructor reliably.
this.exceptionClass = exceptionClass; this.exceptionClass = exceptionClass;
@ -892,10 +893,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
builder.endObject(); builder.endObject();
} }
interface FunctionThatThrowsIOException<T, R> {
R apply(T t) throws IOException;
}
// lower cases and adds underscores to transitions in a name // lower cases and adds underscores to transitions in a name
private static String toUnderscoreCase(String value) { private static String toUnderscoreCase(String value) {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action; package org.elasticsearch.action;
import java.util.ArrayList;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -196,6 +195,7 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction
import org.elasticsearch.action.termvectors.TransportTermVectorsAction; import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.TransportUpdateAction;
import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateAction;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
@ -205,6 +205,7 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
@ -332,7 +333,8 @@ public class ActionModule extends AbstractModule {
private final RestController restController; private final RestController restController;
public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver resolver, public ActionModule(boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
ClusterSettings clusterSettings, ThreadPool threadPool, List<ActionPlugin> actionPlugins) { ClusterSettings clusterSettings, ThreadPool threadPool, List<ActionPlugin> actionPlugins,
NodeClient nodeClient, CircuitBreakerService circuitBreakerService) {
this.transportClient = transportClient; this.transportClient = transportClient;
this.settings = settings; this.settings = settings;
this.actionPlugins = actionPlugins; this.actionPlugins = actionPlugins;
@ -352,9 +354,14 @@ public class ActionModule extends AbstractModule {
restWrapper = newRestWrapper; restWrapper = newRestWrapper;
} }
} }
restController = new RestController(settings, headers, restWrapper); if (transportClient) {
restController = null;
} else {
restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService);
}
} }
public Map<String, ActionHandler<?, ?>> getActions() { public Map<String, ActionHandler<?, ?>> getActions() {
return actions; return actions;
} }
@ -648,8 +655,10 @@ public class ActionModule extends AbstractModule {
} }
} }
// Bind the RestController which is required (by Node) even if rest isn't enabled. if (restController != null) {
bind(RestController.class).toInstance(restController); // Bind the RestController which is required (by Node) even if rest isn't enabled.
bind(RestController.class).toInstance(restController);
}
// Setup the RestHandlers // Setup the RestHandlers
if (NetworkModule.HTTP_ENABLED.get(settings)) { if (NetworkModule.HTTP_ENABLED.get(settings)) {

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -39,7 +39,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
@ -29,6 +30,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo; import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateHelper;
@ -48,14 +50,12 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -65,9 +65,6 @@ import org.elasticsearch.transport.TransportService;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.action.support.replication.ReplicationOperation.ignoreReplicaException;
import static org.elasticsearch.action.support.replication.ReplicationOperation.isConflictException;
/** Performs shard-level bulk (index, delete or update) operations */ /** Performs shard-level bulk (index, delete or update) operations */
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> { public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardRequest, BulkShardResponse> {
@ -235,6 +232,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return location; return location;
} }
private static boolean isConflictException(final Exception e) {
return ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException;
}
private static class UpdateResultHolder { private static class UpdateResultHolder {
final BulkItemRequest replicaRequest; final BulkItemRequest replicaRequest;
final Engine.Result operationResult; final Engine.Result operationResult;
@ -388,11 +389,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
Exception failure = operationResult.getFailure(); Exception failure = operationResult.getFailure();
assert failure instanceof VersionConflictEngineException assert failure instanceof VersionConflictEngineException
|| failure instanceof MapperParsingException || failure instanceof MapperParsingException
|| failure instanceof EngineClosedException
|| failure instanceof IndexShardClosedException
: "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" + : "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" +
" failures. got " + failure; " failures. got " + failure;
if (!ignoreReplicaException(failure)) { if (!TransportActions.isShardNotAvailableException(failure)) {
throw failure; throw failure;
} }
} else { } else {
@ -401,7 +400,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
} catch (Exception e) { } catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure // if its not an ignore replica failure, we need to make sure to bubble up the failure
// so we will fail the shard // so we will fail the shard
if (!ignoreReplicaException(e)) { if (!TransportActions.isShardNotAvailableException(e)) {
throw e; throw e;
} }
} }

View File

@ -30,7 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -30,7 +30,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -36,7 +36,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.ingest.IngestInfo;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.ingest; package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
@ -28,7 +27,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;

View File

@ -202,7 +202,7 @@ public class ReplicationOperation<
shard, shard,
replicaRequest), replicaRequest),
replicaException); replicaException);
if (ignoreReplicaException(replicaException)) { if (TransportActions.isShardNotAvailableException(replicaException)) {
decPendingAndFinishIfNeeded(); decPendingAndFinishIfNeeded();
} else { } else {
RestStatus restStatus = ExceptionsHelper.status(replicaException); RestStatus restStatus = ExceptionsHelper.status(replicaException);
@ -314,30 +314,6 @@ public class ReplicationOperation<
} }
} }
/**
* Should an exception be ignored when the operation is performed on the replica.
*/
public static boolean ignoreReplicaException(Exception e) {
if (TransportActions.isShardNotAvailableException(e)) {
return true;
}
// on version conflict or document missing, it means
// that a new change has crept into the replica, and it's fine
if (isConflictException(e)) {
return true;
}
return false;
}
public static boolean isConflictException(Throwable t) {
final Throwable cause = ExceptionsHelper.unwrapCause(t);
// on version conflict or document missing, it means
// that a new change has crept into the replica, and it's fine
return cause instanceof VersionConflictEngineException;
}
public interface Primary< public interface Primary<
Request extends ReplicationRequest<Request>, Request extends ReplicationRequest<Request>,
ReplicaRequest extends ReplicationRequest<ReplicaRequest>, ReplicaRequest extends ReplicationRequest<ReplicaRequest>,

View File

@ -40,7 +40,6 @@ import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.IfConfig; import org.elasticsearch.common.network.IfConfig;
import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.KeyStoreWrapper;
import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
@ -50,7 +49,7 @@ import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.InternalSettingsPreparer;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;

View File

@ -24,7 +24,7 @@ import joptsimple.OptionSpec;
import joptsimple.util.KeyValuePair; import joptsimple.util.KeyValuePair;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.InternalSettingsPreparer;
import java.util.HashMap; import java.util.HashMap;
import java.util.Locale; import java.util.Locale;

View File

@ -46,7 +46,7 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
@ -160,7 +160,7 @@ public abstract class TransportClient extends AbstractClient {
} }
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool)); modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getClusterSettings(), ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getClusterSettings(),
threadPool, pluginsService.filterPlugins(ActionPlugin.class)); threadPool, pluginsService.filterPlugins(ActionPlugin.class), null, null);
modules.add(actionModule); modules.add(actionModule);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
@ -170,7 +170,7 @@ public abstract class TransportClient extends AbstractClient {
resourcesToClose.add(bigArrays); resourcesToClose.add(bigArrays);
modules.add(settingsModule); modules.add(settingsModule);
NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool, NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService); bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
final Transport transport = networkModule.getTransportSupplier().get(); final Transport transport = networkModule.getTransportSupplier().get();
final TransportService transportService = new TransportService(settings, transport, threadPool, final TransportService transportService = new TransportService(settings, transport, threadPool,
networkModule.getTransportInterceptor(), networkModule.getTransportInterceptor(),

View File

@ -17,14 +17,14 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.http; package org.elasticsearch.common;
import org.elasticsearch.common.util.concurrent.ThreadContext; import java.util.function.Function;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
public interface HttpServerAdapter {
void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext context);
/**
* A {@link Function}-like interface which allows throwing checked exceptions.
*/
@FunctionalInterface
public interface CheckedFunction<T, R, E extends Exception> {
R apply(T t) throws E;
} }

View File

@ -36,6 +36,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import java.io.IOException; import java.io.IOException;
import java.nio.file.FileVisitOption; import java.nio.file.FileVisitOption;
@ -97,7 +98,7 @@ public class LogConfigurator {
final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
Files.walkFileTree(configsPath, options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() { Files.walkFileTree(configsPath, options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
@Override @Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
if (file.getFileName().toString().equals("log4j2.properties")) { if (file.getFileName().toString().equals("log4j2.properties")) {
configurations.add((PropertiesConfiguration) factory.getConfiguration(context, file.toString(), file.toUri())); configurations.add((PropertiesConfiguration) factory.getConfiguration(context, file.toString(), file.toUri()));
} }
@ -122,23 +123,53 @@ public class LogConfigurator {
Configurator.initialize(builder.build()); Configurator.initialize(builder.build());
} }
private static void configureLoggerLevels(Settings settings) { /**
* Configures the logging levels for loggers configured in the specified settings.
*
* @param settings the settings from which logger levels will be extracted
*/
private static void configureLoggerLevels(final Settings settings) {
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) { if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings); final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level); Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
} }
final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap(); final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
for (String key : levels.keySet()) { for (final String key : levels.keySet()) {
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings); // do not set a log level for a logger named level (from the default log setting)
Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level); if (!key.equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey())) {
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level);
}
} }
} }
/**
* Set system properties that can be used in configuration files to specify paths and file patterns for log files. We expose three
* properties here:
* <ul>
* <li>
* {@code es.logs.base_path} the base path containing the log files
* </li>
* <li>
* {@code es.logs.cluster_name} the cluster name, used as the prefix of log filenames in the default configuration
* </li>
* <li>
* {@code es.logs.node_name} the node name, can be used as part of log filenames (only exposed if {@link Node#NODE_NAME_SETTING} is
* explicitly set)
* </li>
* </ul>
*
* @param logsPath the path to the log files
* @param settings the settings to extract the cluster and node names
*/
@SuppressForbidden(reason = "sets system property for logging configuration") @SuppressForbidden(reason = "sets system property for logging configuration")
private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) { private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) {
System.setProperty("es.logs", logsPath.resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString()); System.setProperty("es.logs.base_path", logsPath.toString());
System.setProperty("es.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value());
if (Node.NODE_NAME_SETTING.exists(settings)) {
System.setProperty("es.logs.node_name", Node.NODE_NAME_SETTING.get(settings));
}
} }
} }

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry.FromXContent;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.tasks.RawTaskStatus; import org.elasticsearch.tasks.RawTaskStatus;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -109,13 +110,13 @@ public final class NetworkModule {
CircuitBreakerService circuitBreakerService, CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry, NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry, NamedXContentRegistry xContentRegistry,
NetworkService networkService) { NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) {
this.settings = settings; this.settings = settings;
this.transportClient = transportClient; this.transportClient = transportClient;
for (NetworkPlugin plugin : plugins) { for (NetworkPlugin plugin : plugins) {
if (transportClient == false && HTTP_ENABLED.get(settings)) { if (transportClient == false && HTTP_ENABLED.get(settings)) {
Map<String, Supplier<HttpServerTransport>> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, Map<String, Supplier<HttpServerTransport>> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays,
circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService); circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher);
for (Map.Entry<String, Supplier<HttpServerTransport>> entry : httpTransportFactory.entrySet()) { for (Map.Entry<String, Supplier<HttpServerTransport>> entry : httpTransportFactory.entrySet()) {
registerHttpTransport(entry.getKey(), entry.getValue()); registerHttpTransport(entry.getKey(), entry.getValue());
} }

View File

@ -60,12 +60,6 @@ public class NetworkService extends AbstractComponent {
Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope); Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope); Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING =
Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_SERVER =
Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope);
} }

View File

@ -278,7 +278,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
TcpTransport.CONNECTIONS_PER_NODE_STATE, TcpTransport.CONNECTIONS_PER_NODE_STATE,
TcpTransport.CONNECTIONS_PER_NODE_PING, TcpTransport.CONNECTIONS_PER_NODE_PING,
TcpTransport.PING_SCHEDULE, TcpTransport.PING_SCHEDULE,
TcpTransport.TCP_BLOCKING_CLIENT,
TcpTransport.TCP_CONNECT_TIMEOUT, TcpTransport.TCP_CONNECT_TIMEOUT,
NetworkService.NETWORK_SERVER, NetworkService.NETWORK_SERVER,
TcpTransport.TCP_NO_DELAY, TcpTransport.TCP_NO_DELAY,
@ -286,7 +285,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
TcpTransport.TCP_REUSE_ADDRESS, TcpTransport.TCP_REUSE_ADDRESS,
TcpTransport.TCP_SEND_BUFFER_SIZE, TcpTransport.TCP_SEND_BUFFER_SIZE,
TcpTransport.TCP_RECEIVE_BUFFER_SIZE, TcpTransport.TCP_RECEIVE_BUFFER_SIZE,
TcpTransport.TCP_BLOCKING_SERVER,
NetworkService.GLOBAL_NETWORK_HOST_SETTING, NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING, NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING, NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
@ -295,9 +293,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
NetworkService.TcpSettings.TCP_REUSE_ADDRESS, NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT, NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,

View File

@ -54,6 +54,7 @@ public class SettingsModule implements Module {
private final Logger logger; private final Logger logger;
private final IndexScopedSettings indexScopedSettings; private final IndexScopedSettings indexScopedSettings;
private final ClusterSettings clusterSettings; private final ClusterSettings clusterSettings;
private final SettingsFilter settingsFilter;
public SettingsModule(Settings settings, Setting<?>... additionalSettings) { public SettingsModule(Settings settings, Setting<?>... additionalSettings) {
this(settings, Arrays.asList(additionalSettings), Collections.emptyList()); this(settings, Arrays.asList(additionalSettings), Collections.emptyList());
@ -137,12 +138,13 @@ public class SettingsModule implements Module {
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate(); final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate();
clusterSettings.validate(settings.filter(acceptOnlyClusterSettings)); clusterSettings.validate(settings.filter(acceptOnlyClusterSettings));
validateTribeSettings(settings, clusterSettings); validateTribeSettings(settings, clusterSettings);
this.settingsFilter = new SettingsFilter(settings, settingsFilterPattern);
} }
@Override @Override
public void configure(Binder binder) { public void configure(Binder binder) {
binder.bind(Settings.class).toInstance(settings); binder.bind(Settings.class).toInstance(settings);
binder.bind(SettingsFilter.class).toInstance(new SettingsFilter(settings, settingsFilterPattern)); binder.bind(SettingsFilter.class).toInstance(settingsFilter);
binder.bind(ClusterSettings.class).toInstance(clusterSettings); binder.bind(ClusterSettings.class).toInstance(clusterSettings);
binder.bind(IndexScopedSettings.class).toInstance(indexScopedSettings); binder.bind(IndexScopedSettings.class).toInstance(indexScopedSettings);
} }
@ -218,4 +220,6 @@ public class SettingsModule implements Module {
public ClusterSettings getClusterSettings() { public ClusterSettings getClusterSettings() {
return clusterSettings; return clusterSettings;
} }
public SettingsFilter getSettingsFilter() { return settingsFilter; }
} }

View File

@ -1,206 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.rest.RestStatus.FORBIDDEN;
import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR;
/**
* A component to serve http requests, backed by rest handlers.
*/
public class HttpServer extends AbstractLifecycleComponent implements HttpServerAdapter {
private final HttpServerTransport transport;
private final RestController restController;
private final NodeClient client;
private final CircuitBreakerService circuitBreakerService;
public HttpServer(Settings settings, HttpServerTransport transport, RestController restController,
NodeClient client, CircuitBreakerService circuitBreakerService) {
super(settings);
this.transport = transport;
this.restController = restController;
this.client = client;
this.circuitBreakerService = circuitBreakerService;
transport.httpServerAdapter(this);
}
@Override
protected void doStart() {
transport.start();
if (logger.isInfoEnabled()) {
logger.info("{}", transport.boundAddress());
}
}
@Override
protected void doStop() {
transport.stop();
}
@Override
protected void doClose() {
transport.close();
}
public HttpInfo info() {
return transport.info();
}
public HttpStats stats() {
return transport.stats();
}
public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {
if (request.rawPath().equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
}
RestChannel responseChannel = channel;
try {
int contentLength = request.content().length();
if (restController.canTripCircuitBreaker(request)) {
inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, "<http_request>");
} else {
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength);
}
// iff we could reserve bytes for the request we need to send the response also over this channel
responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength);
restController.dispatchRequest(request, responseChannel, client, threadContext);
} catch (Exception e) {
try {
responseChannel.sendResponse(new BytesRestResponse(channel, e));
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner);
}
}
}
void handleFavicon(RestRequest request, RestChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
Streams.copy(stream, out);
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", out.toByteArray());
channel.sendResponse(restResponse);
}
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
} else {
channel.sendResponse(new BytesRestResponse(FORBIDDEN, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
}
private static final class ResourceHandlingHttpChannel implements RestChannel {
private final RestChannel delegate;
private final CircuitBreakerService circuitBreakerService;
private final int contentLength;
private final AtomicBoolean closed = new AtomicBoolean();
public ResourceHandlingHttpChannel(RestChannel delegate, CircuitBreakerService circuitBreakerService, int contentLength) {
this.delegate = delegate;
this.circuitBreakerService = circuitBreakerService;
this.contentLength = contentLength;
}
@Override
public XContentBuilder newBuilder() throws IOException {
return delegate.newBuilder();
}
@Override
public XContentBuilder newErrorBuilder() throws IOException {
return delegate.newErrorBuilder();
}
@Override
public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException {
return delegate.newBuilder(autoDetectSource, useFiltering);
}
@Override
public BytesStreamOutput bytesOutput() {
return delegate.bytesOutput();
}
@Override
public RestRequest request() {
return delegate.request();
}
@Override
public boolean detailedErrorsEnabled() {
return delegate.detailedErrorsEnabled();
}
@Override
public void sendResponse(RestResponse response) {
close();
delegate.sendResponse(response);
}
private void close() {
// attempt to close once atomically
if (closed.compareAndSet(false, true) == false) {
throw new IllegalStateException("Channel is already closed");
}
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(-contentLength);
}
}
private static CircuitBreaker inFlightRequestsBreaker(CircuitBreakerService circuitBreakerService) {
// We always obtain a fresh breaker to reflect changes to the breaker configuration.
return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);
}
}

View File

@ -21,11 +21,13 @@ package org.elasticsearch.http;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
public interface HttpServerTransport extends LifecycleComponent { public interface HttpServerTransport extends LifecycleComponent {
String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker";
String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss";
BoundTransportAddress boundAddress(); BoundTransportAddress boundAddress();
@ -33,6 +35,15 @@ public interface HttpServerTransport extends LifecycleComponent {
HttpStats stats(); HttpStats stats();
void httpServerAdapter(HttpServerAdapter httpServerAdapter); @FunctionalInterface
interface Dispatcher {
/**
* Dispatches the {@link RestRequest} to the relevant request handler or responds to the given rest channel directly if
* the request can't be handled by any request handler.
* @param request the request to dispatch
* @param channel the response channel of this request
* @param threadContext the nodes thread context
*/
void dispatch(RestRequest request, RestChannel channel, ThreadContext threadContext);
}
} }

View File

@ -44,7 +44,6 @@ import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
@ -675,7 +674,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
if (translog.syncNeeded()) { if (translog.syncNeeded()) {
translog.sync(); translog.sync();
} }
} catch (EngineClosedException | AlreadyClosedException ex) { } catch (AlreadyClosedException ex) {
// fine - continue; // fine - continue;
} catch (IOException e) { } catch (IOException e) {
logger.warn("failed to sync translog", e); logger.warn("failed to sync translog", e);
@ -723,7 +722,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
case STARTED: case STARTED:
try { try {
shard.updateGlobalCheckpointOnPrimary(); shard.updateGlobalCheckpointOnPrimary();
} catch (EngineClosedException | AlreadyClosedException ex) { } catch (AlreadyClosedException ex) {
// fine - continue, the shard was concurrently closed on us. // fine - continue, the shard was concurrently closed on us.
} }
continue; continue;

View File

@ -0,0 +1,38 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.synonym.FlattenGraphFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class FlattenGraphTokenFilterFactory extends AbstractTokenFilterFactory {
public FlattenGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new FlattenGraphFilter(tokenStream);
}
}

View File

@ -512,7 +512,7 @@ public abstract class Engine implements Closeable {
manager.release(searcher); manager.release(searcher);
} }
} }
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
throw ex; throw ex;
} catch (Exception ex) { } catch (Exception ex) {
ensureOpen(); // throw EngineCloseException here if we are already closed ensureOpen(); // throw EngineCloseException here if we are already closed
@ -530,7 +530,7 @@ public abstract class Engine implements Closeable {
protected void ensureOpen() { protected void ensureOpen() {
if (isClosed.get()) { if (isClosed.get()) {
throw new EngineClosedException(shardId, failedEngine.get()); throw new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());
} }
} }
@ -1017,6 +1017,7 @@ public abstract class Engine implements Closeable {
public Index(Term uid, ParsedDocument doc, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin, public Index(Term uid, ParsedDocument doc, long seqNo, long primaryTerm, long version, VersionType versionType, Origin origin,
long startTime, long autoGeneratedIdTimestamp, boolean isRetry) { long startTime, long autoGeneratedIdTimestamp, boolean isRetry) {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime); super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
assert uid.bytes().equals(doc.uid()) : "term uid " + uid + " doesn't match doc uid " + doc.uid();
this.doc = doc; this.doc = doc;
this.isRetry = isRetry; this.isRetry = isRetry;
this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
@ -1282,7 +1283,7 @@ public abstract class Engine implements Closeable {
logger.debug("flushing shard on close - this might take some time to sync files to disk"); logger.debug("flushing shard on close - this might take some time to sync files to disk");
try { try {
flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running.
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
logger.debug("engine already closed - skipping flushAndClose"); logger.debug("engine already closed - skipping flushAndClose");
} }
} finally { } finally {

View File

@ -33,6 +33,7 @@ import java.io.IOException;
* *
* *
*/ */
@Deprecated
public class EngineClosedException extends IndexShardClosedException { public class EngineClosedException extends IndexShardClosedException {
public EngineClosedException(ShardId shardId) { public EngineClosedException(ShardId shardId) {

View File

@ -477,10 +477,7 @@ public class InternalEngine extends Engine {
} }
if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) { if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
if (op.origin().isRecovery()) { if (op.origin() == Operation.Origin.PRIMARY) {
// version conflict, but okay
result = onSuccess.get();
} else {
// fatal version conflict // fatal version conflict
final VersionConflictEngineException e = final VersionConflictEngineException e =
new VersionConflictEngineException( new VersionConflictEngineException(
@ -489,8 +486,13 @@ public class InternalEngine extends Engine {
op.id(), op.id(),
op.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted)); op.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
result = onFailure.apply(e); result = onFailure.apply(e);
} else {
/*
* Version conflicts during recovery and on replicas are normal due to asynchronous execution; as such, we should return a
* successful result.
*/
result = onSuccess.get();
} }
return Optional.of(result); return Optional.of(result);
} else { } else {
return Optional.empty(); return Optional.empty();
@ -544,6 +546,10 @@ public class InternalEngine extends Engine {
// and set the error in operation.setFailure. In case of environment related errors, the failure // and set the error in operation.setFailure. In case of environment related errors, the failure
// is bubbled up // is bubbled up
isDocumentFailure = maybeFailEngine(operation.operationType().getLowercase(), failure) == false; isDocumentFailure = maybeFailEngine(operation.operationType().getLowercase(), failure) == false;
if (failure instanceof AlreadyClosedException) {
// ensureOpen throws AlreadyClosedException which is not a document level issue
isDocumentFailure = false;
}
} catch (Exception inner) { } catch (Exception inner) {
// we failed checking whether the failure can fail the engine, treat it as a persistent engine failure // we failed checking whether the failure can fail the engine, treat it as a persistent engine failure
isDocumentFailure = false; isDocumentFailure = false;
@ -672,7 +678,7 @@ public class InternalEngine extends Engine {
} }
} }
final long expectedVersion = index.version(); final long expectedVersion = index.version();
final Optional<IndexResult> checkVersionConflictResult = final Optional<IndexResult> resultOnVersionConflict =
checkVersionConflict( checkVersionConflict(
index, index,
currentVersion, currentVersion,
@ -682,15 +688,15 @@ public class InternalEngine extends Engine {
e -> new IndexResult(e, currentVersion, index.seqNo())); e -> new IndexResult(e, currentVersion, index.seqNo()));
final IndexResult indexResult; final IndexResult indexResult;
if (checkVersionConflictResult.isPresent()) { if (resultOnVersionConflict.isPresent()) {
indexResult = checkVersionConflictResult.get(); indexResult = resultOnVersionConflict.get();
} else { } else {
// no version conflict // no version conflict
if (index.origin() == Operation.Origin.PRIMARY) { if (index.origin() == Operation.Origin.PRIMARY) {
seqNo = seqNoService().generateSeqNo(); seqNo = seqNoService().generateSeqNo();
} }
/** /*
* Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence * Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence
* number service if this is on the primary, or the existing document's sequence number if this is on the replica. The * number service if this is on the primary, or the existing document's sequence number if this is on the replica. The
* primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created. * primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created.
@ -707,10 +713,12 @@ public class InternalEngine extends Engine {
update(index.uid(), index.docs(), indexWriter); update(index.uid(), index.docs(), indexWriter);
} }
indexResult = new IndexResult(updatedVersion, seqNo, deleted); indexResult = new IndexResult(updatedVersion, seqNo, deleted);
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion));
}
if (!indexResult.hasFailure()) {
location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
? translog.add(new Translog.Index(index, indexResult)) ? translog.add(new Translog.Index(index, indexResult))
: null; : null;
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion));
indexResult.setTranslogLocation(location); indexResult.setTranslogLocation(location);
} }
indexResult.setTook(System.nanoTime() - index.startTime()); indexResult.setTook(System.nanoTime() - index.startTime());
@ -804,7 +812,7 @@ public class InternalEngine extends Engine {
final long expectedVersion = delete.version(); final long expectedVersion = delete.version();
final Optional<DeleteResult> result = final Optional<DeleteResult> resultOnVersionConflict =
checkVersionConflict( checkVersionConflict(
delete, delete,
currentVersion, currentVersion,
@ -812,10 +820,9 @@ public class InternalEngine extends Engine {
deleted, deleted,
() -> new DeleteResult(expectedVersion, delete.seqNo(), true), () -> new DeleteResult(expectedVersion, delete.seqNo(), true),
e -> new DeleteResult(e, expectedVersion, delete.seqNo())); e -> new DeleteResult(e, expectedVersion, delete.seqNo()));
final DeleteResult deleteResult; final DeleteResult deleteResult;
if (result.isPresent()) { if (resultOnVersionConflict.isPresent()) {
deleteResult = result.get(); deleteResult = resultOnVersionConflict.get();
} else { } else {
if (delete.origin() == Operation.Origin.PRIMARY) { if (delete.origin() == Operation.Origin.PRIMARY) {
seqNo = seqNoService().generateSeqNo(); seqNo = seqNoService().generateSeqNo();
@ -824,11 +831,14 @@ public class InternalEngine extends Engine {
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion); updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue); found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue);
deleteResult = new DeleteResult(updatedVersion, seqNo, found); deleteResult = new DeleteResult(updatedVersion, seqNo, found);
versionMap.putUnderLock(delete.uid().bytes(),
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
}
if (!deleteResult.hasFailure()) {
location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
? translog.add(new Translog.Delete(delete, deleteResult)) ? translog.add(new Translog.Delete(delete, deleteResult))
: null; : null;
versionMap.putUnderLock(delete.uid().bytes(),
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
deleteResult.setTranslogLocation(location); deleteResult.setTranslogLocation(location);
} }
deleteResult.setTook(System.nanoTime() - delete.startTime()); deleteResult.setTook(System.nanoTime() - delete.startTime());
@ -895,8 +905,6 @@ public class InternalEngine extends Engine {
} catch (AlreadyClosedException e) { } catch (AlreadyClosedException e) {
failOnTragicEvent(e); failOnTragicEvent(e);
throw e; throw e;
} catch (EngineClosedException e) {
throw e;
} catch (Exception e) { } catch (Exception e) {
try { try {
failEngine("refresh failed", e); failEngine("refresh failed", e);
@ -943,8 +951,6 @@ public class InternalEngine extends Engine {
} catch (AlreadyClosedException e) { } catch (AlreadyClosedException e) {
failOnTragicEvent(e); failOnTragicEvent(e);
throw e; throw e;
} catch (EngineClosedException e) {
throw e;
} catch (Exception e) { } catch (Exception e) {
try { try {
failEngine("writeIndexingBuffer failed", e); failEngine("writeIndexingBuffer failed", e);
@ -1123,7 +1129,7 @@ public class InternalEngine extends Engine {
@Override @Override
public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpungeDeletes,
final boolean upgrade, final boolean upgradeOnlyAncientSegments) throws EngineException, EngineClosedException, IOException { final boolean upgrade, final boolean upgradeOnlyAncientSegments) throws EngineException, IOException {
/* /*
* We do NOT acquire the readlock here since we are waiting on the merges to finish * We do NOT acquire the readlock here since we are waiting on the merges to finish
* that's fine since the IW.rollback should stop all the threads and trigger an IOException * that's fine since the IW.rollback should stop all the threads and trigger an IOException
@ -1209,7 +1215,8 @@ public class InternalEngine extends Engine {
} }
@SuppressWarnings("finally") @SuppressWarnings("finally")
private void failOnTragicEvent(AlreadyClosedException ex) { private boolean failOnTragicEvent(AlreadyClosedException ex) {
final boolean engineFailed;
// if we are already closed due to some tragic exception // if we are already closed due to some tragic exception
// we need to fail the engine. it might have already been failed before // we need to fail the engine. it might have already been failed before
// but we are double-checking it's failed and closed // but we are double-checking it's failed and closed
@ -1222,14 +1229,19 @@ public class InternalEngine extends Engine {
} }
} else { } else {
failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException()); failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException());
engineFailed = true;
} }
} else if (translog.isOpen() == false && translog.getTragicException() != null) { } else if (translog.isOpen() == false && translog.getTragicException() != null) {
failEngine("already closed by tragic event on the translog", translog.getTragicException()); failEngine("already closed by tragic event on the translog", translog.getTragicException());
} else if (failedEngine.get() == null) { // we are closed but the engine is not failed yet? engineFailed = true;
} else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet?
// this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by
// a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error
throw new AssertionError("Unexpected AlreadyClosedException", ex); throw new AssertionError("Unexpected AlreadyClosedException", ex);
} else {
engineFailed = false;
} }
return engineFailed;
} }
@Override @Override
@ -1242,8 +1254,7 @@ public class InternalEngine extends Engine {
// exception that should only be thrown in a tragic event. we pass on the checks to failOnTragicEvent which will // exception that should only be thrown in a tragic event. we pass on the checks to failOnTragicEvent which will
// throw and AssertionError if the tragic event condition is not met. // throw and AssertionError if the tragic event condition is not met.
if (e instanceof AlreadyClosedException) { if (e instanceof AlreadyClosedException) {
failOnTragicEvent((AlreadyClosedException)e); return failOnTragicEvent((AlreadyClosedException)e);
return true;
} else if (e != null && } else if (e != null &&
((indexWriter.isOpen() == false && indexWriter.getTragicException() == e) ((indexWriter.isOpen() == false && indexWriter.getTragicException() == e)
|| (translog.isOpen() == false && translog.getTragicException() == e))) { || (translog.isOpen() == false && translog.getTragicException() == e))) {

View File

@ -197,9 +197,6 @@ public class ShadowEngine extends Engine {
ensureOpen(); ensureOpen();
searcherManager.maybeRefreshBlocking(); searcherManager.maybeRefreshBlocking();
} catch (AlreadyClosedException e) { } catch (AlreadyClosedException e) {
// This means there's a bug somewhere: don't suppress it
throw new AssertionError(e);
} catch (EngineClosedException e) {
throw e; throw e;
} catch (Exception e) { } catch (Exception e) {
try { try {

View File

@ -29,6 +29,7 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
@ -308,12 +309,7 @@ public class QueryShardContext extends QueryRewriteContext {
}); });
} }
@FunctionalInterface private ParsedQuery toQuery(QueryBuilder queryBuilder, CheckedFunction<QueryBuilder, Query, IOException> filterOrQuery) {
private interface CheckedFunction<T, R> {
R apply(T t) throws IOException;
}
private ParsedQuery toQuery(QueryBuilder queryBuilder, CheckedFunction<QueryBuilder, Query> filterOrQuery) {
reset(); reset();
try { try {
QueryBuilder rewriteQuery = QueryBuilder.rewriteQuery(queryBuilder, this); QueryBuilder rewriteQuery = QueryBuilder.rewriteQuery(queryBuilder, this);

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.query.functionscore; package org.elasticsearch.index.query.functionscore;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import java.io.IOException; import java.io.IOException;
@ -29,5 +28,5 @@ import java.io.IOException;
*/ */
@FunctionalInterface @FunctionalInterface
public interface ScoreFunctionParser<FB extends ScoreFunctionBuilder<FB>> { public interface ScoreFunctionParser<FB extends ScoreFunctionBuilder<FB>> {
FB fromXContent(QueryParseContext context) throws IOException, ParsingException; FB fromXContent(QueryParseContext context) throws IOException;
} }

View File

@ -75,7 +75,6 @@ import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
@ -622,7 +621,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} }
/** /**
* Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link EngineClosedException}. * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}.
*/ */
public void refresh(String source) { public void refresh(String source) {
verifyNotClosed(); verifyNotClosed();
@ -1265,7 +1264,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
try { try {
Translog translog = engine.getTranslog(); Translog translog = engine.getTranslog();
return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().getBytes(); return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().getBytes();
} catch (AlreadyClosedException | EngineClosedException ex) { } catch (AlreadyClosedException ex) {
// that's fine we are already close - no need to flush // that's fine we are already close - no need to flush
} }
} }
@ -1304,7 +1303,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public void activateThrottling() { public void activateThrottling() {
try { try {
getEngine().activateThrottling(); getEngine().activateThrottling();
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
// ignore // ignore
} }
} }
@ -1312,13 +1311,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public void deactivateThrottling() { public void deactivateThrottling() {
try { try {
getEngine().deactivateThrottling(); getEngine().deactivateThrottling();
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
// ignore // ignore
} }
} }
private void handleRefreshException(Exception e) { private void handleRefreshException(Exception e) {
if (e instanceof EngineClosedException) { if (e instanceof AlreadyClosedException) {
// ignore // ignore
} else if (e instanceof RefreshFailedEngineException) { } else if (e instanceof RefreshFailedEngineException) {
RefreshFailedEngineException rfee = (RefreshFailedEngineException) e; RefreshFailedEngineException rfee = (RefreshFailedEngineException) e;
@ -1530,7 +1529,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
Engine getEngine() { Engine getEngine() {
Engine engine = getEngineOrNull(); Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
throw new EngineClosedException(shardId); throw new AlreadyClosedException("engine is closed");
} }
return engine; return engine;
} }
@ -1667,7 +1666,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private Engine createNewEngine(EngineConfig config) { private Engine createNewEngine(EngineConfig config) {
synchronized (mutex) { synchronized (mutex) {
if (state == IndexShardState.CLOSED) { if (state == IndexShardState.CLOSED) {
throw new EngineClosedException(shardId); throw new AlreadyClosedException(shardId + " can't create engine - shard is closed");
} }
assert this.currentEngineReference.get() == null; assert this.currentEngineReference.get() == null;
Engine engine = newEngine(config); Engine engine = newEngine(config);
@ -1769,7 +1768,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
try { try {
final Engine engine = getEngine(); final Engine engine = getEngine();
engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1)); engine.getTranslog().ensureSynced(candidates.stream().map(Tuple::v1));
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
// that's fine since we already synced everything on engine close - this also is conform with the methods // that's fine since we already synced everything on engine close - this also is conform with the methods
// documentation // documentation
} catch (IOException ex) { // if this fails we are in deep shit - fail the request } catch (IOException ex) { // if this fails we are in deep shit - fail the request
@ -1884,8 +1883,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* refresh listeners. * refresh listeners.
* Otherwise <code>false</code>. * Otherwise <code>false</code>.
* *
* @throws EngineClosedException if the engine is already closed * @throws AlreadyClosedException if the engine or internal indexwriter in the engine is already closed
* @throws AlreadyClosedException if the internal indexwriter in the engine is already closed
*/ */
public boolean isRefreshNeeded() { public boolean isRefreshNeeded() {
return getEngine().refreshNeeded() || (refreshListeners != null && refreshListeners.refreshNeeded()); return getEngine().refreshNeeded() || (refreshListeners != null && refreshListeners.refreshNeeded());

View File

@ -21,10 +21,6 @@ package org.elasticsearch.index.translog;
import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.MultiCommand;
import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
/** /**
* Class encapsulating and dispatching commands from the {@code elasticsearch-translog} command line tool * Class encapsulating and dispatching commands from the {@code elasticsearch-translog} command line tool

View File

@ -21,6 +21,7 @@ package org.elasticsearch.indices;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
@ -30,7 +31,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingOperationListener;
@ -384,7 +384,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index
protected void checkIdle(IndexShard shard, long inactiveTimeNS) { protected void checkIdle(IndexShard shard, long inactiveTimeNS) {
try { try {
shard.checkIdle(inactiveTimeNS); shard.checkIdle(inactiveTimeNS);
} catch (EngineClosedException e) { } catch (AlreadyClosedException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); logger.trace((Supplier<?>) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e);
} }
} }

View File

@ -42,6 +42,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
@ -87,6 +88,7 @@ import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
@ -1251,7 +1253,7 @@ public class IndicesService extends AbstractLifecycleComponent
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) { public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */ * of dependencies we pass in a function that can perform the parsing. */
ShardSearchRequest.FilterParser filterParser = bytes -> { CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) { try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) {
return new QueryParseContext(parser).parseInnerQueryBuilder(); return new QueryParseContext(parser).parseInnerQueryBuilder();
} }

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.node.internal; package org.elasticsearch.node;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
@ -106,7 +106,8 @@ public class InternalSettingsPreparer {
} }
} }
if (foundSuffixes.size() > 1) { if (foundSuffixes.size() > 1) {
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ",")); throw new SettingsException("multiple settings files found with suffixes: "
+ Strings.collectionToDelimitedString(foundSuffixes, ","));
} }
// re-initialize settings now that the config file has been loaded // re-initialize settings now that the config file has been loaded
@ -195,7 +196,9 @@ public class InternalSettingsPreparer {
private static String promptForValue(String key, Terminal terminal, boolean secret) { private static String promptForValue(String key, Terminal terminal, boolean secret) {
if (terminal == null) { if (terminal == null) {
throw new UnsupportedOperationException("found property [" + key + "] with value [" + (secret ? SECRET_PROMPT_VALUE : TEXT_PROMPT_VALUE) +"]. prompting for property values is only supported when running elasticsearch in the foreground"); throw new UnsupportedOperationException("found property [" + key + "] with value ["
+ (secret ? SECRET_PROMPT_VALUE : TEXT_PROMPT_VALUE)
+ "]. prompting for property values is only supported when running elasticsearch in the foreground");
} }
if (secret) { if (secret) {

View File

@ -84,7 +84,6 @@ import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesModule;
@ -101,8 +100,6 @@ import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestService;
import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.AnalysisPlugin;
import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.ClusterPlugin;
@ -117,6 +114,7 @@ import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
@ -342,14 +340,18 @@ public class Node implements Closeable {
modules.add(clusterModule); modules.add(clusterModule);
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
modules.add(indicesModule); modules.add(indicesModule);
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class)); SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(),
settingsModule.getClusterSettings(), threadPool, pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);
modules.add(new GatewayModule());
CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(), CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings()); settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService); resourcesToClose.add(circuitBreakerService);
ActionModule actionModule = new ActionModule(false, settings, clusterModule.getIndexNameExpressionResolver(),
settingsModule.getClusterSettings(), threadPool, pluginsService.filterPlugins(ActionPlugin.class), client,
circuitBreakerService);
modules.add(actionModule);
modules.add(new GatewayModule());
BigArrays bigArrays = createBigArrays(settings, circuitBreakerService); BigArrays bigArrays = createBigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays); resourcesToClose.add(bigArrays);
modules.add(settingsModule); modules.add(settingsModule);
@ -388,30 +390,35 @@ public class Node implements Closeable {
pluginsService.filterPlugins(Plugin.class).stream() pluginsService.filterPlugins(Plugin.class).stream()
.map(Plugin::getCustomMetaDataUpgrader) .map(Plugin::getCustomMetaDataUpgrader)
.collect(Collectors.toList()); .collect(Collectors.toList());
final RestController restController = actionModule.getRestController();
final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class), final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class),
threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService); threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService,
restController::dispatchRequest);
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders); final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders);
final Transport transport = networkModule.getTransportSupplier().get(); final Transport transport = networkModule.getTransportSupplier().get();
final TransportService transportService = newTransportService(settings, transport, threadPool, final TransportService transportService = newTransportService(settings, transport, threadPool,
networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings()); networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings());
final Consumer<Binder> httpBind; final Consumer<Binder> httpBind;
final HttpServerTransport httpServerTransport;
if (networkModule.isHttpEnabled()) { if (networkModule.isHttpEnabled()) {
HttpServerTransport httpServerTransport = networkModule.getHttpServerTransportSupplier().get(); httpServerTransport = networkModule.getHttpServerTransportSupplier().get();
HttpServer httpServer = new HttpServer(settings, httpServerTransport, actionModule.getRestController(), client,
circuitBreakerService);
httpBind = b -> { httpBind = b -> {
b.bind(HttpServer.class).toInstance(httpServer);
b.bind(HttpServerTransport.class).toInstance(httpServerTransport); b.bind(HttpServerTransport.class).toInstance(httpServerTransport);
}; };
} else { } else {
httpBind = b -> { httpBind = b -> {
b.bind(HttpServer.class).toProvider(Providers.of(null)); b.bind(HttpServerTransport.class).toProvider(Providers.of(null));
}; };
httpServerTransport = null;
} }
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService,
namedWriteableRegistry, networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class)); namedWriteableRegistry, networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class));
NodeService nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter());
modules.add(b -> { modules.add(b -> {
b.bind(NodeService.class).toInstance(nodeService);
b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry); b.bind(NamedXContentRegistry.class).toInstance(xContentRegistry);
b.bind(PluginsService.class).toInstance(pluginsService); b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(Client.class).toInstance(client); b.bind(Client.class).toInstance(client);
@ -630,7 +637,7 @@ public class Node implements Closeable {
if (NetworkModule.HTTP_ENABLED.get(settings)) { if (NetworkModule.HTTP_ENABLED.get(settings)) {
injector.getInstance(HttpServer.class).start(); injector.getInstance(HttpServerTransport.class).start();
} }
// start nodes now, after the http server, because it may take some time // start nodes now, after the http server, because it may take some time
@ -663,7 +670,7 @@ public class Node implements Closeable {
injector.getInstance(TribeService.class).stop(); injector.getInstance(TribeService.class).stop();
injector.getInstance(ResourceWatcherService.class).stop(); injector.getInstance(ResourceWatcherService.class).stop();
if (NetworkModule.HTTP_ENABLED.get(settings)) { if (NetworkModule.HTTP_ENABLED.get(settings)) {
injector.getInstance(HttpServer.class).stop(); injector.getInstance(HttpServerTransport.class).stop();
} }
injector.getInstance(SnapshotsService.class).stop(); injector.getInstance(SnapshotsService.class).stop();
@ -714,7 +721,7 @@ public class Node implements Closeable {
toClose.add(injector.getInstance(NodeService.class)); toClose.add(injector.getInstance(NodeService.class));
toClose.add(() -> stopWatch.stop().start("http")); toClose.add(() -> stopWatch.stop().start("http"));
if (NetworkModule.HTTP_ENABLED.get(settings)) { if (NetworkModule.HTTP_ENABLED.get(settings)) {
toClose.add(injector.getInstance(HttpServer.class)); toClose.add(injector.getInstance(HttpServerTransport.class));
} }
toClose.add(() -> stopWatch.stop().start("snapshot_service")); toClose.add(() -> stopWatch.stop().start("snapshot_service"));
toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotsService.class));

View File

@ -22,7 +22,6 @@ package org.elasticsearch.node;
import org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitor; import org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitor;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.service.NodeService;
public class NodeModule extends AbstractModule { public class NodeModule extends AbstractModule {
@ -38,7 +37,6 @@ public class NodeModule extends AbstractModule {
protected void configure() { protected void configure() {
bind(Node.class).toInstance(node); bind(Node.class).toInstance(node);
bind(MonitorService.class).toInstance(monitorService); bind(MonitorService.class).toInstance(monitorService);
bind(NodeService.class).asEagerSingleton();
bind(DiskThresholdMonitor.class).asEagerSingleton(); bind(DiskThresholdMonitor.class).asEagerSingleton();
} }
} }

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.node.service; package org.elasticsearch.node;
import org.elasticsearch.Build; import org.elasticsearch.Build;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -27,11 +27,10 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.IngestService;
@ -55,17 +54,16 @@ public class NodeService extends AbstractComponent implements Closeable {
private final IngestService ingestService; private final IngestService ingestService;
private final SettingsFilter settingsFilter; private final SettingsFilter settingsFilter;
private ScriptService scriptService; private ScriptService scriptService;
private final HttpServerTransport httpServerTransport;
@Nullable
private final HttpServer httpServer;
private final Discovery discovery; private final Discovery discovery;
@Inject NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService, Discovery discovery,
public NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService, Discovery discovery,
TransportService transportService, IndicesService indicesService, PluginsService pluginService, TransportService transportService, IndicesService indicesService, PluginsService pluginService,
CircuitBreakerService circuitBreakerService, ScriptService scriptService, @Nullable HttpServer httpServer, CircuitBreakerService circuitBreakerService, ScriptService scriptService,
IngestService ingestService, ClusterService clusterService, SettingsFilter settingsFilter) { @Nullable HttpServerTransport httpServerTransport, IngestService ingestService, ClusterService clusterService,
SettingsFilter settingsFilter) {
super(settings); super(settings);
this.threadPool = threadPool; this.threadPool = threadPool;
this.monitorService = monitorService; this.monitorService = monitorService;
@ -74,7 +72,7 @@ public class NodeService extends AbstractComponent implements Closeable {
this.discovery = discovery; this.discovery = discovery;
this.pluginService = pluginService; this.pluginService = pluginService;
this.circuitBreakerService = circuitBreakerService; this.circuitBreakerService = circuitBreakerService;
this.httpServer = httpServer; this.httpServerTransport = httpServerTransport;
this.ingestService = ingestService; this.ingestService = ingestService;
this.settingsFilter = settingsFilter; this.settingsFilter = settingsFilter;
this.scriptService = scriptService; this.scriptService = scriptService;
@ -91,7 +89,7 @@ public class NodeService extends AbstractComponent implements Closeable {
jvm ? monitorService.jvmService().info() : null, jvm ? monitorService.jvmService().info() : null,
threadPool ? this.threadPool.info() : null, threadPool ? this.threadPool.info() : null,
transport ? transportService.info() : null, transport ? transportService.info() : null,
http ? (httpServer == null ? null : httpServer.info()) : null, http ? (httpServerTransport == null ? null : httpServerTransport.info()) : null,
plugin ? (pluginService == null ? null : pluginService.info()) : null, plugin ? (pluginService == null ? null : pluginService.info()) : null,
ingest ? (ingestService == null ? null : ingestService.info()) : null, ingest ? (ingestService == null ? null : ingestService.info()) : null,
indices ? indicesService.getTotalIndexingBufferBytes() : null indices ? indicesService.getTotalIndexingBufferBytes() : null
@ -111,7 +109,7 @@ public class NodeService extends AbstractComponent implements Closeable {
threadPool ? this.threadPool.stats() : null, threadPool ? this.threadPool.stats() : null,
fs ? monitorService.fsService().stats() : null, fs ? monitorService.fsService().stats() : null,
transport ? transportService.stats() : null, transport ? transportService.stats() : null,
http ? (httpServer == null ? null : httpServer.stats()) : null, http ? (httpServerTransport == null ? null : httpServerTransport.stats()) : null,
circuitBreaker ? circuitBreakerService.stats() : null, circuitBreaker ? circuitBreakerService.stats() : null,
script ? scriptService.stats() : null, script ? scriptService.stats() : null,
discoveryStats ? discovery.stats() : null, discoveryStats ? discovery.stats() : null,

View File

@ -33,9 +33,7 @@ import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.IOException; import java.io.IOException;
@ -63,7 +61,6 @@ import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;

View File

@ -22,9 +22,7 @@ package org.elasticsearch.plugins;
import joptsimple.OptionSet; import joptsimple.OptionSet;
import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.EnvironmentAwareCommand;
import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException; import java.io.IOException;
import java.nio.file.DirectoryStream; import java.nio.file.DirectoryStream;
@ -33,7 +31,6 @@ import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
* A command for the plugin cli to list plugins installed in elasticsearch. * A command for the plugin cli to list plugins installed in elasticsearch.

View File

@ -69,8 +69,11 @@ public interface NetworkPlugin {
* See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. * See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation.
*/ */
default Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, default Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService,
NamedXContentRegistry xContentRegistry, NetworkService networkService) { NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry,
NetworkService networkService,
HttpServerTransport.Dispatcher dispatcher) {
return Collections.emptyMap(); return Collections.emptyMap();
} }
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.rest;
import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
@ -125,14 +126,7 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH
* the request against a channel. * the request against a channel.
*/ */
@FunctionalInterface @FunctionalInterface
protected interface RestChannelConsumer { protected interface RestChannelConsumer extends CheckedConsumer<RestChannel, Exception> {
/**
* Executes a request against the given channel.
*
* @param channel the channel for sending the response
* @throws Exception if an exception occurred executing the request
*/
void accept(RestChannel channel) throws Exception;
} }
/** /**

View File

@ -19,23 +19,35 @@
package org.elasticsearch.rest; package org.elasticsearch.rest;
import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.UnaryOperator; import java.util.function.UnaryOperator;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.path.PathTrie;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.FORBIDDEN;
import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR;
import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.OK;
public class RestController extends AbstractComponent { public class RestController extends AbstractComponent {
@ -48,18 +60,27 @@ public class RestController extends AbstractComponent {
private final UnaryOperator<RestHandler> handlerWrapper; private final UnaryOperator<RestHandler> handlerWrapper;
private final NodeClient client;
private final CircuitBreakerService circuitBreakerService;
/** Rest headers that are copied to internal requests made during a rest request. */ /** Rest headers that are copied to internal requests made during a rest request. */
private final Set<String> headersToCopy; private final Set<String> headersToCopy;
public RestController(Settings settings, Set<String> headersToCopy, UnaryOperator<RestHandler> handlerWrapper) { public RestController(Settings settings, Set<String> headersToCopy, UnaryOperator<RestHandler> handlerWrapper,
NodeClient client, CircuitBreakerService circuitBreakerService) {
super(settings); super(settings);
this.headersToCopy = headersToCopy; this.headersToCopy = headersToCopy;
if (handlerWrapper == null) { if (handlerWrapper == null) {
handlerWrapper = h -> h; // passthrough if no wrapper set handlerWrapper = h -> h; // passthrough if no wrapper set
} }
this.handlerWrapper = handlerWrapper; this.handlerWrapper = handlerWrapper;
this.client = client;
this.circuitBreakerService = circuitBreakerService;
} }
/** /**
* Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request. * Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request.
* *
@ -137,7 +158,34 @@ public class RestController extends AbstractComponent {
return (handler != null) ? handler.canTripCircuitBreaker() : true; return (handler != null) ? handler.canTripCircuitBreaker() : true;
} }
public void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext) throws Exception { public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) {
if (request.rawPath().equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
}
RestChannel responseChannel = channel;
try {
int contentLength = request.content().length();
if (canTripCircuitBreaker(request)) {
inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, "<http_request>");
} else {
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(contentLength);
}
// iff we could reserve bytes for the request we need to send the response also over this channel
responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength);
dispatchRequest(request, responseChannel, client, threadContext);
} catch (Exception e) {
try {
responseChannel.sendResponse(new BytesRestResponse(channel, e));
} catch (Exception inner) {
inner.addSuppressed(e);
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner);
}
}
}
void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext) throws Exception {
if (!checkRequestParameters(request, channel)) { if (!checkRequestParameters(request, channel)) {
return; return;
} }
@ -223,4 +271,84 @@ public class RestController extends AbstractComponent {
// my_index/my_type/http%3A%2F%2Fwww.google.com // my_index/my_type/http%3A%2F%2Fwww.google.com
return request.rawPath(); return request.rawPath();
} }
void handleFavicon(RestRequest request, RestChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
Streams.copy(stream, out);
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", out.toByteArray());
channel.sendResponse(restResponse);
}
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
} else {
channel.sendResponse(new BytesRestResponse(FORBIDDEN, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY));
}
}
private static final class ResourceHandlingHttpChannel implements RestChannel {
private final RestChannel delegate;
private final CircuitBreakerService circuitBreakerService;
private final int contentLength;
private final AtomicBoolean closed = new AtomicBoolean();
public ResourceHandlingHttpChannel(RestChannel delegate, CircuitBreakerService circuitBreakerService, int contentLength) {
this.delegate = delegate;
this.circuitBreakerService = circuitBreakerService;
this.contentLength = contentLength;
}
@Override
public XContentBuilder newBuilder() throws IOException {
return delegate.newBuilder();
}
@Override
public XContentBuilder newErrorBuilder() throws IOException {
return delegate.newErrorBuilder();
}
@Override
public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException {
return delegate.newBuilder(autoDetectSource, useFiltering);
}
@Override
public BytesStreamOutput bytesOutput() {
return delegate.bytesOutput();
}
@Override
public RestRequest request() {
return delegate.request();
}
@Override
public boolean detailedErrorsEnabled() {
return delegate.detailedErrorsEnabled();
}
@Override
public void sendResponse(RestResponse response) {
close();
delegate.sendResponse(response);
}
private void close() {
// attempt to close once atomically
if (closed.compareAndSet(false, true) == false) {
throw new IllegalStateException("Channel is already closed");
}
inFlightRequestsBreaker(circuitBreakerService).addWithoutBreaking(-contentLength);
}
}
private static CircuitBreaker inFlightRequestsBreaker(CircuitBreakerService circuitBreakerService) {
// We always obtain a fresh breaker to reflect changes to the breaker configuration.
return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);
}
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.search.internal;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -66,7 +67,7 @@ public final class AliasFilter implements Writeable {
final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData(); final IndexMetaData indexMetaData = context.getIndexSettings().getIndexMetaData();
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */ * of dependencies we pass in a function that can perform the parsing. */
ShardSearchRequest.FilterParser filterParser = bytes -> { CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(context.getXContentRegistry(), bytes)) { try (XContentParser parser = XContentFactory.xContent(bytes).createParser(context.getXContentRegistry(), bytes)) {
return context.newParseContext(parser).parseInnerQueryBuilder(); return context.newParseContext(parser).parseInnerQueryBuilder();
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.search.internal;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
@ -88,17 +89,13 @@ public interface ShardSearchRequest {
*/ */
void rewrite(QueryShardContext context) throws IOException; void rewrite(QueryShardContext context) throws IOException;
@FunctionalInterface
public interface FilterParser {
QueryBuilder parse(byte[] bytes) throws IOException;
}
/** /**
* Returns the filter associated with listed filtering aliases. * Returns the filter associated with listed filtering aliases.
* <p> * <p>
* The list of filtering aliases should be obtained by calling MetaData.filteringAliases. * The list of filtering aliases should be obtained by calling MetaData.filteringAliases.
* Returns <tt>null</tt> if no filtering is required.</p> * Returns <tt>null</tt> if no filtering is required.</p>
*/ */
static QueryBuilder parseAliasFilter(FilterParser filterParser, static QueryBuilder parseAliasFilter(CheckedFunction<byte[], QueryBuilder, IOException> filterParser,
IndexMetaData metaData, String... aliasNames) { IndexMetaData metaData, String... aliasNames) {
if (aliasNames == null || aliasNames.length == 0) { if (aliasNames == null || aliasNames.length == 0) {
return null; return null;
@ -110,7 +107,7 @@ public interface ShardSearchRequest {
return null; return null;
} }
try { try {
return filterParser.parse(alias.filter().uncompressed()); return filterParser.apply(alias.filter().uncompressed());
} catch (IOException ex) { } catch (IOException ex) {
throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex); throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex);
} }

View File

@ -31,8 +31,8 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit;
/** /**
* This class is the internal representation of a profiled Query, corresponding * This class is the internal representation of a profiled Query, corresponding
@ -48,6 +48,7 @@ public final class ProfileResult implements Writeable, ToXContent {
private static final ParseField TYPE = new ParseField("type"); private static final ParseField TYPE = new ParseField("type");
private static final ParseField DESCRIPTION = new ParseField("description"); private static final ParseField DESCRIPTION = new ParseField("description");
private static final ParseField NODE_TIME = new ParseField("time"); private static final ParseField NODE_TIME = new ParseField("time");
private static final ParseField NODE_TIME_RAW = new ParseField("time_in_nanos");
private static final ParseField CHILDREN = new ParseField("children"); private static final ParseField CHILDREN = new ParseField("children");
private static final ParseField BREAKDOWN = new ParseField("breakdown"); private static final ParseField BREAKDOWN = new ParseField("breakdown");
@ -146,7 +147,7 @@ public final class ProfileResult implements Writeable, ToXContent {
builder = builder.startObject() builder = builder.startObject()
.field(TYPE.getPreferredName(), type) .field(TYPE.getPreferredName(), type)
.field(DESCRIPTION.getPreferredName(), description) .field(DESCRIPTION.getPreferredName(), description)
.field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", getTime() / 1000000.0)) .timeValueField(NODE_TIME_RAW.getPreferredName(), NODE_TIME.getPreferredName(), getTime(), TimeUnit.NANOSECONDS)
.field(BREAKDOWN.getPreferredName(), timings); .field(BREAKDOWN.getPreferredName(), timings);
if (!children.isEmpty()) { if (!children.isEmpty()) {

View File

@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.concurrent.TimeUnit;
/** /**
* Public interface and serialization container for profiled timings of the * Public interface and serialization container for profiled timings of the
@ -52,6 +52,7 @@ public class CollectorResult implements ToXContent, Writeable {
private static final ParseField NAME = new ParseField("name"); private static final ParseField NAME = new ParseField("name");
private static final ParseField REASON = new ParseField("reason"); private static final ParseField REASON = new ParseField("reason");
private static final ParseField TIME = new ParseField("time"); private static final ParseField TIME = new ParseField("time");
private static final ParseField TIME_NANOS = new ParseField("time_in_nanos");
private static final ParseField CHILDREN = new ParseField("children"); private static final ParseField CHILDREN = new ParseField("children");
/** /**
@ -140,7 +141,7 @@ public class CollectorResult implements ToXContent, Writeable {
builder = builder.startObject() builder = builder.startObject()
.field(NAME.getPreferredName(), getName()) .field(NAME.getPreferredName(), getName())
.field(REASON.getPreferredName(), getReason()) .field(REASON.getPreferredName(), getReason())
.field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0))); .timeValueField(TIME_NANOS.getPreferredName(), TIME.getPreferredName(), getTime(), TimeUnit.NANOSECONDS);
if (!children.isEmpty()) { if (!children.isEmpty()) {
builder = builder.startArray(CHILDREN.getPreferredName()); builder = builder.startArray(CHILDREN.getPreferredName());

View File

@ -112,8 +112,6 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent implements Transport { public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent implements Transport {
public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker"; public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker";
public static final String TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX = "transport_server_boss";
public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker";
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss"; public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
// the scheduled internal ping interval setting, defaults to disabled (-1) // the scheduled internal ping interval setting, defaults to disabled (-1)
@ -137,10 +135,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
boolSetting("transport.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Setting.Property.NodeScope); boolSetting("transport.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Setting.Property.NodeScope);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = public static final Setting<Boolean> TCP_REUSE_ADDRESS =
boolSetting("transport.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Setting.Property.NodeScope); boolSetting("transport.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Setting.Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
boolSetting("transport.tcp.blocking_client", NetworkService.TcpSettings.TCP_BLOCKING_CLIENT, Setting.Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_SERVER =
boolSetting("transport.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Setting.Property.NodeScope);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
Setting.Property.NodeScope); Setting.Property.NodeScope);
@ -150,7 +144,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9);
private static final int PING_DATA_SIZE = -1; private static final int PING_DATA_SIZE = -1;
protected final boolean blockingClient;
private final CircuitBreakerService circuitBreakerService; private final CircuitBreakerService circuitBreakerService;
// package visibility for tests // package visibility for tests
protected final ScheduledPing scheduledPing; protected final ScheduledPing scheduledPing;
@ -194,7 +187,6 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
this.networkService = networkService; this.networkService = networkService;
this.transportName = transportName; this.transportName = transportName;
this.blockingClient = TCP_BLOCKING_CLIENT.get(settings);
defaultConnectionProfile = buildDefaultConnectionProfile(settings); defaultConnectionProfile = buildDefaultConnectionProfile(settings);
} }

View File

@ -37,11 +37,8 @@ public enum Transports {
public static final boolean isTransportThread(Thread t) { public static final boolean isTransportThread(Thread t) {
final String threadName = t.getName(); final String threadName = t.getName();
for (String s : Arrays.asList( for (String s : Arrays.asList(
HttpServerTransport.HTTP_SERVER_BOSS_THREAD_NAME_PREFIX,
HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_SERVER_BOSS_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX,
TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX,
TEST_MOCK_TRANSPORT_THREAD_PREFIX)) { TEST_MOCK_TRANSPORT_THREAD_PREFIX)) {
if (threadName.contains(s)) { if (threadName.contains(s)) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.support.replication; package org.elasticsearch.action.support.replication;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.UnavailableShardsException;
@ -55,7 +56,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexShardClosedException;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
@ -431,12 +431,12 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
} }
private ElasticsearchException randomRetryPrimaryException(ShardId shardId) { private Exception randomRetryPrimaryException(ShardId shardId) {
return randomFrom( return randomFrom(
new ShardNotFoundException(shardId), new ShardNotFoundException(shardId),
new IndexNotFoundException(shardId.getIndex()), new IndexNotFoundException(shardId.getIndex()),
new IndexShardClosedException(shardId), new IndexShardClosedException(shardId),
new EngineClosedException(shardId), new AlreadyClosedException(shardId + " primary is closed"),
new ReplicationOperation.RetryOnPrimaryException(shardId, "hello") new ReplicationOperation.RetryOnPrimaryException(shardId, "hello")
); );
} }

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.HttpStats;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
@ -89,8 +88,6 @@ public class NetworkModuleTests extends ModuleTestCase {
public HttpStats stats() { public HttpStats stats() {
return null; return null;
} }
@Override
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {}
} }
@ -155,7 +152,8 @@ public class NetworkModuleTests extends ModuleTestCase {
CircuitBreakerService circuitBreakerService, CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry, NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry, NamedXContentRegistry xContentRegistry,
NetworkService networkService) { NetworkService networkService,
HttpServerTransport.Dispatcher requestDispatcher) {
return Collections.singletonMap("custom", custom); return Collections.singletonMap("custom", custom);
} }
}); });
@ -195,7 +193,8 @@ public class NetworkModuleTests extends ModuleTestCase {
CircuitBreakerService circuitBreakerService, CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry, NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry, NamedXContentRegistry xContentRegistry,
NetworkService networkService) { NetworkService networkService,
HttpServerTransport.Dispatcher requestDispatcher) {
Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>(); Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>();
supplierMap.put("custom", custom); supplierMap.put("custom", custom);
supplierMap.put("default_custom", def); supplierMap.put("default_custom", def);
@ -228,7 +227,8 @@ public class NetworkModuleTests extends ModuleTestCase {
CircuitBreakerService circuitBreakerService, CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry, NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry, NamedXContentRegistry xContentRegistry,
NetworkService networkService) { NetworkService networkService,
HttpServerTransport.Dispatcher requestDispatcher) {
Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>(); Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>();
supplierMap.put("custom", custom); supplierMap.put("custom", custom);
supplierMap.put("default_custom", def); supplierMap.put("default_custom", def);
@ -276,6 +276,7 @@ public class NetworkModuleTests extends ModuleTestCase {
} }
private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) { private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) {
return new NetworkModule(settings, transportClient, Arrays.asList(plugins), threadPool, null, null, null, xContentRegistry(), null); return new NetworkModule(settings, transportClient, Arrays.asList(plugins), threadPool, null, null, null, xContentRegistry(), null,
(a, b, c) -> {});
} }
} }

View File

@ -1,231 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http;
import java.util.Collections;
import java.util.Map;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.rest.AbstractRestChannel;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
public class HttpServerTests extends ESTestCase {
private static final ByteSizeValue BREAKER_LIMIT = new ByteSizeValue(20);
private HttpServer httpServer;
private CircuitBreaker inFlightRequestsBreaker;
@Before
public void setup() {
Settings settings = Settings.EMPTY;
CircuitBreakerService circuitBreakerService = new HierarchyCircuitBreakerService(
Settings.builder()
.put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), BREAKER_LIMIT)
.build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
// we can do this here only because we know that we don't adjust breaker settings dynamically in the test
inFlightRequestsBreaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);
HttpServerTransport httpServerTransport = new TestHttpServerTransport();
RestController restController = new RestController(settings, Collections.emptySet(), null);
restController.registerHandler(RestRequest.Method.GET, "/",
(request, channel, client) -> channel.sendResponse(
new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)));
restController.registerHandler(RestRequest.Method.GET, "/error", (request, channel, client) -> {
throw new IllegalArgumentException("test error");
});
httpServer = new HttpServer(settings, httpServerTransport, restController, null, circuitBreakerService);
httpServer.start();
}
public void testDispatchRequestAddsAndFreesBytesOnSuccess() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK);
httpServer.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestAddsAndFreesBytesOnError() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/error", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST);
httpServer.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
// we will produce an error in the rest handler and one more when sending the error response
TestRestRequest request = new TestRestRequest("/error", content);
ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true);
httpServer.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestLimitsBytes() {
int contentLength = BREAKER_LIMIT.bytesAsInt() + 1;
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.SERVICE_UNAVAILABLE);
httpServer.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(1, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements
HttpServerTransport {
public TestHttpServerTransport() {
super(Settings.EMPTY);
}
@Override
protected void doStart() {
}
@Override
protected void doStop() {
}
@Override
protected void doClose() {
}
@Override
public BoundTransportAddress boundAddress() {
TransportAddress transportAddress = buildNewFakeTransportAddress();
return new BoundTransportAddress(new TransportAddress[] {transportAddress} ,transportAddress);
}
@Override
public HttpInfo info() {
return null;
}
@Override
public HttpStats stats() {
return null;
}
@Override
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {
}
}
private static final class AssertingChannel extends AbstractRestChannel {
private final RestStatus expectedStatus;
protected AssertingChannel(RestRequest request, boolean detailedErrorsEnabled, RestStatus expectedStatus) {
super(request, detailedErrorsEnabled);
this.expectedStatus = expectedStatus;
}
@Override
public void sendResponse(RestResponse response) {
assertEquals(expectedStatus, response.status());
}
}
private static final class ExceptionThrowingChannel extends AbstractRestChannel {
protected ExceptionThrowingChannel(RestRequest request, boolean detailedErrorsEnabled) {
super(request, detailedErrorsEnabled);
}
@Override
public void sendResponse(RestResponse response) {
throw new IllegalStateException("always throwing an exception for testing");
}
}
private static final class TestRestRequest extends RestRequest {
private final BytesReference content;
private TestRestRequest(String path, String content) {
super(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path);
this.content = new BytesArray(content);
}
@Override
public Method method() {
return Method.GET;
}
@Override
public String uri() {
return null;
}
@Override
public boolean hasContent() {
return true;
}
@Override
public BytesReference content() {
return content;
}
@Override
public String header(String name) {
return null;
}
@Override
public Iterable<Map.Entry<String, String>> headers() {
return null;
}
}
}

View File

@ -48,7 +48,9 @@ import org.elasticsearch.index.cache.query.IndexQueryCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.InternalEngineTests;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingOperationListener;
@ -247,7 +249,8 @@ public class IndexModuleTests extends ESTestCase {
assertEquals(IndexingSlowLog.class, indexService.getIndexOperationListeners().get(0).getClass()); assertEquals(IndexingSlowLog.class, indexService.getIndexOperationListeners().get(0).getClass());
assertSame(listener, indexService.getIndexOperationListeners().get(1)); assertSame(listener, indexService.getIndexOperationListeners().get(1));
Engine.Index index = new Engine.Index(new Term("_uid", "1"), null); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null);
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
ShardId shardId = new ShardId(new Index("foo", "bar"), 0); ShardId shardId = new ShardId(new Index("foo", "bar"), 0);
for (IndexingOperationListener l : indexService.getIndexOperationListeners()) { for (IndexingOperationListener l : indexService.getIndexOperationListeners()) {
l.preIndex(shardId, index); l.preIndex(shardId, index);

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index; package org.elasticsearch.index;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
@ -34,7 +33,6 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
@ -58,9 +56,6 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ConnectionProfile;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -91,7 +86,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
/** /**
* Tests for indices that use shadow replicas and a shared filesystem * Tests for indices that use shadow replicas and a shared filesystem
*/ */
@LuceneTestCase.AwaitsFix(bugUrl = "fix this fails intermittently")
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class IndexWithShadowReplicasIT extends ESIntegTestCase { public class IndexWithShadowReplicasIT extends ESIntegTestCase {
@ -459,7 +453,6 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
assertHitCount(resp, numPhase1Docs + numPhase2Docs); assertHitCount(resp, numPhase1Docs + numPhase2Docs);
} }
@AwaitsFix(bugUrl = "uncaught exception")
public void testPrimaryRelocationWhereRecoveryFails() throws Exception { public void testPrimaryRelocationWhereRecoveryFails() throws Exception {
Path dataPath = createTempDir(); Path dataPath = createTempDir();
Settings nodeSettings = Settings.builder() Settings nodeSettings = Settings.builder()

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import java.io.IOException;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
public class FlattenGraphTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testBasic() throws IOException {
Index index = new Index("test", "_na_");
String name = "ngr";
Settings indexSettings = newAnalysisSettingsBuilder().build();
IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
Settings settings = newAnalysisSettingsBuilder().build();
// "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
TokenStream in = new CannedTokenStream(0, 12, new Token[] {
token("wtf", 1, 5, 0, 3),
token("what", 0, 1, 0, 3),
token("wow", 0, 3, 0, 3),
token("the", 1, 1, 0, 3),
token("fudge", 1, 3, 0, 3),
token("that's", 1, 1, 0, 3),
token("funny", 1, 1, 0, 3),
token("happened", 1, 1, 4, 12)
});
TokenStream tokens = new FlattenGraphTokenFilterFactory(indexProperties, null, name, settings).create(in);
// ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
assertTokenStreamContents(tokens,
new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
new int[] {0, 0, 0, 0, 0, 0, 0, 4},
new int[] {3, 3, 3, 3, 3, 3, 3, 12},
new int[] {1, 0, 0, 1, 0, 1, 0, 1},
new int[] {3, 1, 1, 1, 1, 1, 1, 1},
12);
}
private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
final Token t = new Token(term, startOffset, endOffset);
t.setPositionIncrement(posInc);
t.setPositionLength(posLength);
return t;
}
}

View File

@ -33,6 +33,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
@ -44,21 +45,18 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.RefreshListeners; import org.elasticsearch.index.shard.RefreshListeners;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils; import org.elasticsearch.index.shard.ShardUtils;
@ -83,7 +81,6 @@ import java.util.List;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasKey;
@ -172,8 +169,8 @@ public class ShadowEngineTests extends ESTestCase {
} }
private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) { private ParsedDocument testParsedDocument(String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) {
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0); Field versionField = new NumericDocValuesField("_version", 0);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField); document.add(uidField);
@ -254,8 +251,16 @@ public class ShadowEngineTests extends ESTestCase {
return config; return config;
} }
protected Term newUid(String id) { // protected Term newUid(String id) {
return new Term("_uid", id); // return new Term("_uid", id);
// }
protected Term newUid(ParsedDocument doc) {
return new Term("_uid", doc.uid());
}
private Engine.Index indexForDoc(ParsedDocument doc) {
return new Engine.Index(newUid(doc), doc);
} }
protected static final BytesReference B_1 = new BytesArray(new byte[]{1}); protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
@ -264,8 +269,8 @@ public class ShadowEngineTests extends ESTestCase {
public void testCommitStats() { public void testCommitStats() {
// create a doc and refresh // create a doc and refresh
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
CommitStats stats1 = replicaEngine.commitStats(); CommitStats stats1 = replicaEngine.commitStats();
assertThat(stats1.getGeneration(), greaterThan(0L)); assertThat(stats1.getGeneration(), greaterThan(0L));
@ -296,11 +301,11 @@ public class ShadowEngineTests extends ESTestCase {
assertThat(primaryEngine.segmentsStats(false).getMemoryInBytes(), equalTo(0L)); assertThat(primaryEngine.segmentsStats(false).getMemoryInBytes(), equalTo(0L));
// create a doc and refresh // create a doc and refresh
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, testDocumentWithTextField(), B_2, null); ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null);
primaryEngine.index(new Engine.Index(newUid("2"), doc2)); primaryEngine.index(indexForDoc(doc2));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(false); segments = primaryEngine.segments(false);
@ -358,8 +363,8 @@ public class ShadowEngineTests extends ESTestCase {
assertThat(segments.get(0).isCompound(), equalTo(true)); assertThat(segments.get(0).isCompound(), equalTo(true));
ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, testDocumentWithTextField(), B_3, null); ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null);
primaryEngine.index(new Engine.Index(newUid("3"), doc3)); primaryEngine.index(indexForDoc(doc3));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(false); segments = primaryEngine.segments(false);
@ -408,7 +413,7 @@ public class ShadowEngineTests extends ESTestCase {
assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
assertThat(segments.get(1).isCompound(), equalTo(true)); assertThat(segments.get(1).isCompound(), equalTo(true));
primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc)));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(false); segments = primaryEngine.segments(false);
@ -430,8 +435,8 @@ public class ShadowEngineTests extends ESTestCase {
primaryEngine.flush(); primaryEngine.flush();
replicaEngine.refresh("test"); replicaEngine.refresh("test");
ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, testDocumentWithTextField(), B_3, null); ParsedDocument doc4 = testParsedDocument("4", "test", null, testDocumentWithTextField(), B_3, null);
primaryEngine.index(new Engine.Index(newUid("4"), doc4)); primaryEngine.index(indexForDoc(doc4));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(false); segments = primaryEngine.segments(false);
@ -463,19 +468,19 @@ public class ShadowEngineTests extends ESTestCase {
List<Segment> segments = primaryEngine.segments(true); List<Segment> segments = primaryEngine.segments(true);
assertThat(segments.isEmpty(), equalTo(true)); assertThat(segments.isEmpty(), equalTo(true));
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(true); segments = primaryEngine.segments(true);
assertThat(segments.size(), equalTo(1)); assertThat(segments.size(), equalTo(1));
assertThat(segments.get(0).ramTree, notNullValue()); assertThat(segments.get(0).ramTree, notNullValue());
ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, testDocumentWithTextField(), B_2, null); ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null);
primaryEngine.index(new Engine.Index(newUid("2"), doc2)); primaryEngine.index(indexForDoc(doc2));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, testDocumentWithTextField(), B_3, null); ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null);
primaryEngine.index(new Engine.Index(newUid("3"), doc3)); primaryEngine.index(indexForDoc(doc3));
primaryEngine.refresh("test"); primaryEngine.refresh("test");
segments = primaryEngine.segments(true); segments = primaryEngine.segments(true);
@ -500,9 +505,9 @@ public class ShadowEngineTests extends ESTestCase {
// create a document // create a document
ParseContext.Document document = testDocumentWithTextField(); ParseContext.Document document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
ParsedDocument doc = testParsedDocument("1", "1", "test", null, document, B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null);
try { try {
replicaEngine.index(new Engine.Index(newUid("1"), doc)); replicaEngine.index(indexForDoc(doc));
fail("should have thrown an exception"); fail("should have thrown an exception");
} catch (UnsupportedOperationException e) {} } catch (UnsupportedOperationException e) {}
replicaEngine.refresh("test"); replicaEngine.refresh("test");
@ -512,16 +517,16 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
searchResult.close(); searchResult.close();
Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(false)); assertThat(getResult.exists(), equalTo(false));
getResult.release(); getResult.release();
// index a document // index a document
document = testDocument(); document = testDocument();
document.add(new TextField("value", "test1", Field.Store.YES)); document.add(new TextField("value", "test1", Field.Store.YES));
doc = testParsedDocument("1", "1", "test", null, document, B_1, null); doc = testParsedDocument("1", "test", null, document, B_1, null);
try { try {
replicaEngine.index(new Engine.Index(newUid("1"), doc)); replicaEngine.index(indexForDoc(doc));
fail("should have thrown an exception"); fail("should have thrown an exception");
} catch (UnsupportedOperationException e) {} } catch (UnsupportedOperationException e) {}
replicaEngine.refresh("test"); replicaEngine.refresh("test");
@ -531,15 +536,15 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
searchResult.close(); searchResult.close();
getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); getResult = replicaEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(false)); assertThat(getResult.exists(), equalTo(false));
getResult.release(); getResult.release();
// Now, add a document to the primary so we can test shadow engine deletes // Now, add a document to the primary so we can test shadow engine deletes
document = testDocumentWithTextField(); document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
doc = testParsedDocument("1", "1", "test", null, document, B_1, null); doc = testParsedDocument("1", "test", null, document, B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
primaryEngine.flush(); primaryEngine.flush();
replicaEngine.refresh("test"); replicaEngine.refresh("test");
@ -550,14 +555,14 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// And the replica can retrieve it // And the replica can retrieve it
getResult = replicaEngine.get(new Engine.Get(false, newUid("1"))); getResult = replicaEngine.get(new Engine.Get(false, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
// try to delete it on the replica // try to delete it on the replica
try { try {
replicaEngine.delete(new Engine.Delete("test", "1", newUid("1"))); replicaEngine.delete(new Engine.Delete("test", "1", newUid(doc)));
fail("should have thrown an exception"); fail("should have thrown an exception");
} catch (UnsupportedOperationException e) {} } catch (UnsupportedOperationException e) {}
replicaEngine.flush(); replicaEngine.flush();
@ -569,7 +574,7 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close(); searchResult.close();
getResult = replicaEngine.get(new Engine.Get(false, newUid("1"))); getResult = replicaEngine.get(new Engine.Get(false, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -579,7 +584,7 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close(); searchResult.close();
getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(false, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -593,8 +598,8 @@ public class ShadowEngineTests extends ESTestCase {
// create a document // create a document
ParseContext.Document document = testDocumentWithTextField(); ParseContext.Document document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
ParsedDocument doc = testParsedDocument("1", "1", "test", null, document, B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
// its not there... // its not there...
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -609,18 +614,18 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// but, we can still get it (in realtime) // but, we can still get it (in realtime)
Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
// can't get it from the replica, because it's not in the translog for a shadow replica // can't get it from the replica, because it's not in the translog for a shadow replica
getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); getResult = replicaEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(false)); assertThat(getResult.exists(), equalTo(false));
getResult.release(); getResult.release();
// but, not there non realtime // but, not there non realtime
getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(false, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
getResult.release(); getResult.release();
@ -631,7 +636,7 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// also in non realtime // also in non realtime
getResult = primaryEngine.get(new Engine.Get(false, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(false, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -646,8 +651,8 @@ public class ShadowEngineTests extends ESTestCase {
document = testDocument(); document = testDocument();
document.add(new TextField("value", "test1", Field.Store.YES)); document.add(new TextField("value", "test1", Field.Store.YES));
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_2), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_2), SourceFieldMapper.Defaults.FIELD_TYPE));
doc = testParsedDocument("1", "1", "test", null, document, B_2, null); doc = testParsedDocument("1", "test", null, document, B_2, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
// its not updated yet... // its not updated yet...
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -657,7 +662,7 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// but, we can still get it (in realtime) // but, we can still get it (in realtime)
getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -690,7 +695,7 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// now delete // now delete
primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc)));
// its not deleted yet // its not deleted yet
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -700,7 +705,7 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// but, get should not see it (in realtime) // but, get should not see it (in realtime)
getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(false)); assertThat(getResult.exists(), equalTo(false));
getResult.release(); getResult.release();
@ -716,8 +721,8 @@ public class ShadowEngineTests extends ESTestCase {
// add it back // add it back
document = testDocumentWithTextField(); document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
doc = testParsedDocument("1", "1", "test", null, document, B_1, null); doc = testParsedDocument("1", "test", null, document, B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
// its not there... // its not there...
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -740,7 +745,7 @@ public class ShadowEngineTests extends ESTestCase {
primaryEngine.flush(); primaryEngine.flush();
// and, verify get (in real time) // and, verify get (in real time)
getResult = primaryEngine.get(new Engine.Get(true, newUid("1"))); getResult = primaryEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -752,7 +757,7 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
searchResult.close(); searchResult.close();
getResult = replicaEngine.get(new Engine.Get(true, newUid("1"))); getResult = replicaEngine.get(new Engine.Get(true, newUid(doc)));
assertThat(getResult.exists(), equalTo(true)); assertThat(getResult.exists(), equalTo(true));
assertThat(getResult.docIdAndVersion(), notNullValue()); assertThat(getResult.docIdAndVersion(), notNullValue());
getResult.release(); getResult.release();
@ -761,8 +766,8 @@ public class ShadowEngineTests extends ESTestCase {
// now do an update // now do an update
document = testDocument(); document = testDocument();
document.add(new TextField("value", "test1", Field.Store.YES)); document.add(new TextField("value", "test1", Field.Store.YES));
doc = testParsedDocument("1", "1", "test", null, document, B_1, null); doc = testParsedDocument("1", "test", null, document, B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
// its not updated yet... // its not updated yet...
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -797,8 +802,8 @@ public class ShadowEngineTests extends ESTestCase {
searchResult.close(); searchResult.close();
// create a document // create a document
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
// its not there... // its not there...
searchResult = primaryEngine.acquireSearcher("test"); searchResult = primaryEngine.acquireSearcher("test");
@ -827,7 +832,7 @@ public class ShadowEngineTests extends ESTestCase {
// don't release the replica search result yet... // don't release the replica search result yet...
// delete, refresh and do a new search, it should not be there // delete, refresh and do a new search, it should not be there
primaryEngine.delete(new Engine.Delete("test", "1", newUid("1"))); primaryEngine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc)));
primaryEngine.flush(); primaryEngine.flush();
primaryEngine.refresh("test"); primaryEngine.refresh("test");
replicaEngine.refresh("test"); replicaEngine.refresh("test");
@ -842,8 +847,8 @@ public class ShadowEngineTests extends ESTestCase {
} }
public void testFailEngineOnCorruption() { public void testFailEngineOnCorruption() {
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
primaryEngine.flush(); primaryEngine.flush();
MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class);
leaf.setRandomIOExceptionRate(1.0); leaf.setRandomIOExceptionRate(1.0);
@ -860,7 +865,7 @@ public class ShadowEngineTests extends ESTestCase {
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close(); searchResult.close();
fail("exception expected"); fail("exception expected");
} catch (EngineClosedException ex) { } catch (AlreadyClosedException ex) {
// all is well // all is well
} }
} }
@ -879,8 +884,8 @@ public class ShadowEngineTests extends ESTestCase {
*/ */
public void testFailStart() throws IOException { public void testFailStart() throws IOException {
// Need a commit point for this // Need a commit point for this
ParsedDocument doc = testParsedDocument("1", "1", "test", null, testDocumentWithTextField(), B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(new Engine.Index(newUid("1"), doc)); primaryEngine.index(indexForDoc(doc));
primaryEngine.flush(); primaryEngine.flush();
// this test fails if any reader, searcher or directory is not closed - MDW FTW // this test fails if any reader, searcher or directory is not closed - MDW FTW
@ -965,8 +970,8 @@ public class ShadowEngineTests extends ESTestCase {
// create a document // create a document
ParseContext.Document document = testDocumentWithTextField(); ParseContext.Document document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
ParsedDocument doc = testParsedDocument("1", "1", "test", null, document, B_1, null); ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null);
pEngine.index(new Engine.Index(newUid("1"), doc)); pEngine.index(indexForDoc(doc));
pEngine.flush(true, true); pEngine.flush(true, true);
t.join(); t.join();

View File

@ -214,7 +214,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
assertEquals("b", fields[1].stringValue()); assertEquals("b", fields[1].stringValue());
IndexShard shard = indexService.getShard(0); IndexShard shard = indexService.getShard(0);
shard.index(new Engine.Index(new Term("_uid", "1"), doc)); shard.index(new Engine.Index(new Term("_uid", doc.uid() ), doc));
shard.refresh("test"); shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) { try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
@ -253,7 +253,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
assertEquals("b", fields[1].stringValue()); assertEquals("b", fields[1].stringValue());
IndexShard shard = indexService.getShard(0); IndexShard shard = indexService.getShard(0);
shard.index(new Engine.Index(new Term("_uid", "1"), doc)); shard.index(new Engine.Index(new Term("_uid", doc.uid()), doc));
shard.refresh("test"); shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) { try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();

View File

@ -56,6 +56,7 @@ import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
@ -100,9 +101,9 @@ public class IndexShardIT extends ESSingleNodeTestCase {
return pluginList(InternalSettingsPlugin.class); return pluginList(InternalSettingsPlugin.class);
} }
private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long seqNo, private ParsedDocument testParsedDocument(String id, String type, String routing, long seqNo,
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0); Field versionField = new NumericDocValuesField("_version", 0);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField); document.add(uidField);
@ -325,14 +326,13 @@ public class IndexShardIT extends ESSingleNodeTestCase {
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
ParsedDocument doc = testParsedDocument( ParsedDocument doc = testParsedDocument(
"1",
"1", "1",
"test", "test",
null, null,
SequenceNumbersService.UNASSIGNED_SEQ_NO, SequenceNumbersService.UNASSIGNED_SEQ_NO,
new ParseContext.Document(), new ParseContext.Document(),
new BytesArray(new byte[]{1}), null); new BytesArray(new byte[]{1}), null);
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
shard.index(index); shard.index(index);
assertTrue(shard.shouldFlush()); assertTrue(shard.shouldFlush());
assertEquals(2, shard.getEngine().getTranslog().totalOperations()); assertEquals(2, shard.getEngine().getTranslog().totalOperations());

View File

@ -29,6 +29,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -547,9 +548,9 @@ public class IndexShardTests extends IndexShardTestCase {
closeShards(shard); closeShards(shard);
} }
private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, private ParsedDocument testParsedDocument(String id, String type, String routing,
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE); Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0); Field versionField = new NumericDocValuesField("_version", 0);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField); document.add(uidField);
@ -619,9 +620,9 @@ public class IndexShardTests extends IndexShardTestCase {
}); });
recoveryShardFromStore(shard); recoveryShardFromStore(shard);
ParsedDocument doc = testParsedDocument("1", "1", "test", null, new ParseContext.Document(), ParsedDocument doc = testParsedDocument("1", "test", null, new ParseContext.Document(),
new BytesArray(new byte[]{1}), null); new BytesArray(new byte[]{1}), null);
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
shard.index(index); shard.index(index);
assertEquals(1, preIndex.get()); assertEquals(1, preIndex.get());
assertEquals(1, postIndexCreate.get()); assertEquals(1, postIndexCreate.get());
@ -640,7 +641,7 @@ public class IndexShardTests extends IndexShardTestCase {
assertEquals(0, postDelete.get()); assertEquals(0, postDelete.get());
assertEquals(0, postDeleteException.get()); assertEquals(0, postDeleteException.get());
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid()));
shard.delete(delete); shard.delete(delete);
assertEquals(2, preIndex.get()); assertEquals(2, preIndex.get());
@ -657,7 +658,7 @@ public class IndexShardTests extends IndexShardTestCase {
try { try {
shard.index(index); shard.index(index);
fail(); fail();
} catch (IllegalIndexShardStateException e) { } catch (AlreadyClosedException e) {
} }
@ -671,7 +672,7 @@ public class IndexShardTests extends IndexShardTestCase {
try { try {
shard.delete(delete); shard.delete(delete);
fail(); fail();
} catch (IllegalIndexShardStateException e) { } catch (AlreadyClosedException e) {
} }
@ -1376,10 +1377,10 @@ public class IndexShardTests extends IndexShardTestCase {
for (int i = 0; i < numDocs; i++) { for (int i = 0; i < numDocs; i++) {
final String id = Integer.toString(i); final String id = Integer.toString(i);
final ParsedDocument doc = final ParsedDocument doc =
testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null);
final Engine.Index index = final Engine.Index index =
new Engine.Index( new Engine.Index(
new Term("_uid", id), new Term("_uid", doc.uid()),
doc, doc,
SequenceNumbersService.UNASSIGNED_SEQ_NO, SequenceNumbersService.UNASSIGNED_SEQ_NO,
0, 0,
@ -1406,10 +1407,10 @@ public class IndexShardTests extends IndexShardTestCase {
for (final Integer i : ids) { for (final Integer i : ids) {
final String id = Integer.toString(i); final String id = Integer.toString(i);
final ParsedDocument doc = final ParsedDocument doc =
testParsedDocument(id, id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null); testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null);
final Engine.Index index = final Engine.Index index =
new Engine.Index( new Engine.Index(
new Term("_uid", id), new Term("_uid", doc.uid()),
doc, doc,
SequenceNumbersService.UNASSIGNED_SEQ_NO, SequenceNumbersService.UNASSIGNED_SEQ_NO,
0, 0,

View File

@ -21,6 +21,8 @@ package org.elasticsearch.index.shard;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.InternalEngineTests;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -131,9 +133,10 @@ public class IndexingOperationListenerTests extends ESTestCase{
} }
Collections.shuffle(indexingOperationListeners, random()); Collections.shuffle(indexingOperationListeners, random());
IndexingOperationListener.CompositeListener compositeListener = IndexingOperationListener.CompositeListener compositeListener =
new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger);
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", "1")); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", "test", null);
Engine.Index index = new Engine.Index(new Term("_uid", "1"), null); Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", doc.uid()));
Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true)); compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbersService.UNASSIGNED_SEQ_NO, true));
assertEquals(0, preIndex.get()); assertEquals(0, preIndex.get());
assertEquals(0, postIndex.get()); assertEquals(0, postIndex.get());

View File

@ -48,6 +48,7 @@ import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
@ -297,7 +298,7 @@ public class RefreshListenersTests extends ESTestCase {
} }
listener.assertNoError(); listener.assertNoError();
Engine.Get get = new Engine.Get(false, new Term("_uid", "test:"+threadId)); Engine.Get get = new Engine.Get(false, new Term("_uid", Uid.createUid("test", threadId)));
try (Engine.GetResult getResult = engine.get(get)) { try (Engine.GetResult getResult = engine.get(get)) {
assertTrue("document not found", getResult.exists()); assertTrue("document not found", getResult.exists());
assertEquals(iteration, getResult.version()); assertEquals(iteration, getResult.version());
@ -328,7 +329,7 @@ public class RefreshListenersTests extends ESTestCase {
String uid = type + ":" + id; String uid = type + ":" + id;
Document document = new Document(); Document document = new Document();
document.add(new TextField("test", testFieldValue, Field.Store.YES)); document.add(new TextField("test", testFieldValue, Field.Store.YES));
Field uidField = new Field("_uid", type + ":" + id, UidFieldMapper.Defaults.FIELD_TYPE); Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField); document.add(uidField);
@ -338,7 +339,7 @@ public class RefreshListenersTests extends ESTestCase {
document.add(seqID.primaryTerm); document.add(seqID.primaryTerm);
BytesReference source = new BytesArray(new byte[] { 1 }); BytesReference source = new BytesArray(new byte[] { 1 });
ParsedDocument doc = new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, null); ParsedDocument doc = new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, null);
Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc);
return engine.index(index); return engine.index(index);
} }

View File

@ -56,6 +56,7 @@ import org.elasticsearch.index.engine.Engine.Operation.Origin;
import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -625,8 +626,12 @@ public class TranslogTests extends ESTestCase {
} }
} }
private Term newUid(String id) { private Term newUid(ParsedDocument doc) {
return new Term("_uid", id); return new Term("_uid", doc.uid());
}
private Term newUid(String uid) {
return new Term("_uid", uid);
} }
public void testVerifyTranslogIsNotDeleted() throws IOException { public void testVerifyTranslogIsNotDeleted() throws IOException {
@ -2014,7 +2019,7 @@ public class TranslogTests extends ESTestCase {
seqID.seqNo.setLongValue(randomSeqNum); seqID.seqNo.setLongValue(randomSeqNum);
seqID.seqNoDocValue.setLongValue(randomSeqNum); seqID.seqNoDocValue.setLongValue(randomSeqNum);
seqID.primaryTerm.setLongValue(randomPrimaryTerm); seqID.primaryTerm.setLongValue(randomPrimaryTerm);
Field uidField = new Field("_uid", "1", UidFieldMapper.Defaults.FIELD_TYPE); Field uidField = new Field("_uid", Uid.createUid("test", "1"), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 1); Field versionField = new NumericDocValuesField("_version", 1);
Document document = new Document(); Document document = new Document();
document.add(new TextField("value", "test", Field.Store.YES)); document.add(new TextField("value", "test", Field.Store.YES));
@ -2025,7 +2030,7 @@ public class TranslogTests extends ESTestCase {
document.add(seqID.primaryTerm); document.add(seqID.primaryTerm);
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, null); ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, null);
Engine.Index eIndex = new Engine.Index(newUid("1"), doc, randomSeqNum, randomPrimaryTerm, Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false); 1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true); Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
Translog.Index index = new Translog.Index(eIndex, eIndexResult); Translog.Index index = new Translog.Index(eIndex, eIndexResult);
@ -2036,7 +2041,7 @@ public class TranslogTests extends ESTestCase {
Translog.Index serializedIndex = new Translog.Index(in); Translog.Index serializedIndex = new Translog.Index(in);
assertEquals(index, serializedIndex); assertEquals(index, serializedIndex);
Engine.Delete eDelete = new Engine.Delete("type", "1", newUid("1"), randomSeqNum, randomPrimaryTerm, Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm,
2, VersionType.INTERNAL, Origin.PRIMARY, 0); 2, VersionType.INTERNAL, Origin.PRIMARY, 0);
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true); Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult); Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);

View File

@ -22,7 +22,7 @@ package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.NodeService;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;

View File

@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;

View File

@ -29,11 +29,26 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.UnaryOperator; import java.util.function.UnaryOperator;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.FakeRestRequest;
import org.junit.Before;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq; import static org.mockito.Matchers.eq;
@ -43,10 +58,39 @@ import static org.mockito.Mockito.verify;
public class RestControllerTests extends ESTestCase { public class RestControllerTests extends ESTestCase {
private static final ByteSizeValue BREAKER_LIMIT = new ByteSizeValue(20);
private CircuitBreaker inFlightRequestsBreaker;
private RestController restController;
private HierarchyCircuitBreakerService circuitBreakerService;
@Before
public void setup() {
Settings settings = Settings.EMPTY;
circuitBreakerService = new HierarchyCircuitBreakerService(
Settings.builder()
.put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), BREAKER_LIMIT)
.build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
// we can do this here only because we know that we don't adjust breaker settings dynamically in the test
inFlightRequestsBreaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS);
HttpServerTransport httpServerTransport = new TestHttpServerTransport();
restController = new RestController(settings, Collections.emptySet(), null, null, circuitBreakerService);
restController.registerHandler(RestRequest.Method.GET, "/",
(request, channel, client) -> channel.sendResponse(
new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)));
restController.registerHandler(RestRequest.Method.GET, "/error", (request, channel, client) -> {
throw new IllegalArgumentException("test error");
});
httpServerTransport.start();
}
public void testApplyRelevantHeaders() throws Exception { public void testApplyRelevantHeaders() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
Set<String> headers = new HashSet<>(Arrays.asList("header.1", "header.2")); Set<String> headers = new HashSet<>(Arrays.asList("header.1", "header.2"));
final RestController restController = new RestController(Settings.EMPTY, headers, null); final RestController restController = new RestController(Settings.EMPTY, headers, null, null, circuitBreakerService);
restController.registerHandler(RestRequest.Method.GET, "/", restController.registerHandler(RestRequest.Method.GET, "/",
(RestRequest request, RestChannel channel, NodeClient client) -> { (RestRequest request, RestChannel channel, NodeClient client) -> {
assertEquals("true", threadContext.getHeader("header.1")); assertEquals("true", threadContext.getHeader("header.1"));
@ -66,7 +110,7 @@ public class RestControllerTests extends ESTestCase {
} }
public void testCanTripCircuitBreaker() throws Exception { public void testCanTripCircuitBreaker() throws Exception {
RestController controller = new RestController(Settings.EMPTY, Collections.emptySet(), null); RestController controller = new RestController(Settings.EMPTY, Collections.emptySet(), null, null, circuitBreakerService);
// trip circuit breaker by default // trip circuit breaker by default
controller.registerHandler(RestRequest.Method.GET, "/trip", new FakeRestHandler(true)); controller.registerHandler(RestRequest.Method.GET, "/trip", new FakeRestHandler(true));
controller.registerHandler(RestRequest.Method.GET, "/do-not-trip", new FakeRestHandler(false)); controller.registerHandler(RestRequest.Method.GET, "/do-not-trip", new FakeRestHandler(false));
@ -126,7 +170,8 @@ public class RestControllerTests extends ESTestCase {
assertSame(handler, h); assertSame(handler, h);
return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true);
}; };
final RestController restController = new RestController(Settings.EMPTY, Collections.emptySet(), wrapper); final RestController restController = new RestController(Settings.EMPTY, Collections.emptySet(), wrapper, null,
circuitBreakerService);
restController.registerHandler(RestRequest.Method.GET, "/", handler); restController.registerHandler(RestRequest.Method.GET, "/", handler);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
restController.dispatchRequest(new FakeRestRequest.Builder(xContentRegistry()).build(), null, null, threadContext); restController.dispatchRequest(new FakeRestRequest.Builder(xContentRegistry()).build(), null, null, threadContext);
@ -154,4 +199,156 @@ public class RestControllerTests extends ESTestCase {
return canTripCircuitBreaker; return canTripCircuitBreaker;
} }
} }
public void testDispatchRequestAddsAndFreesBytesOnSuccess() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK);
restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestAddsAndFreesBytesOnError() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/error", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST);
restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() {
int contentLength = BREAKER_LIMIT.bytesAsInt();
String content = randomAsciiOfLength(contentLength);
// we will produce an error in the rest handler and one more when sending the error response
TestRestRequest request = new TestRestRequest("/error", content);
ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true);
restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(0, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
public void testDispatchRequestLimitsBytes() {
int contentLength = BREAKER_LIMIT.bytesAsInt() + 1;
String content = randomAsciiOfLength(contentLength);
TestRestRequest request = new TestRestRequest("/", content);
AssertingChannel channel = new AssertingChannel(request, true, RestStatus.SERVICE_UNAVAILABLE);
restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY));
assertEquals(1, inFlightRequestsBreaker.getTrippedCount());
assertEquals(0, inFlightRequestsBreaker.getUsed());
}
private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements
HttpServerTransport {
public TestHttpServerTransport() {
super(Settings.EMPTY);
}
@Override
protected void doStart() {
}
@Override
protected void doStop() {
}
@Override
protected void doClose() {
}
@Override
public BoundTransportAddress boundAddress() {
TransportAddress transportAddress = buildNewFakeTransportAddress();
return new BoundTransportAddress(new TransportAddress[] {transportAddress} ,transportAddress);
}
@Override
public HttpInfo info() {
return null;
}
@Override
public HttpStats stats() {
return null;
}
}
private static final class AssertingChannel extends AbstractRestChannel {
private final RestStatus expectedStatus;
protected AssertingChannel(RestRequest request, boolean detailedErrorsEnabled, RestStatus expectedStatus) {
super(request, detailedErrorsEnabled);
this.expectedStatus = expectedStatus;
}
@Override
public void sendResponse(RestResponse response) {
assertEquals(expectedStatus, response.status());
}
}
private static final class ExceptionThrowingChannel extends AbstractRestChannel {
protected ExceptionThrowingChannel(RestRequest request, boolean detailedErrorsEnabled) {
super(request, detailedErrorsEnabled);
}
@Override
public void sendResponse(RestResponse response) {
throw new IllegalStateException("always throwing an exception for testing");
}
}
private static final class TestRestRequest extends RestRequest {
private final BytesReference content;
private TestRestRequest(String path, String content) {
super(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path);
this.content = new BytesArray(content);
}
@Override
public Method method() {
return Method.GET;
}
@Override
public String uri() {
return null;
}
@Override
public boolean hasContent() {
return true;
}
@Override
public BytesReference content() {
return content;
}
@Override
public String header(String name) {
return null;
}
@Override
public Iterable<Map.Entry<String, String>> headers() {
return null;
}
}
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -43,7 +44,7 @@ public class RestNodesStatsActionTests extends ESTestCase {
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
action = new RestNodesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet(), null)); action = new RestNodesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet(), null, null, null));
} }
public void testUnrecognizedMetric() throws IOException { public void testUnrecognizedMetric() throws IOException {

View File

@ -41,7 +41,7 @@ public class RestIndicesStatsActionTests extends ESTestCase {
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
action = new RestIndicesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet(), null)); action = new RestIndicesStatsAction(Settings.EMPTY, new RestController(Settings.EMPTY, Collections.emptySet(), null, null, null));
} }
public void testUnrecognizedMetric() throws IOException { public void testUnrecognizedMetric() throws IOException {

View File

@ -74,7 +74,7 @@ public class RestIndicesActionTests extends ESTestCase {
public void testBuildTable() { public void testBuildTable() {
final Settings settings = Settings.EMPTY; final Settings settings = Settings.EMPTY;
final RestController restController = new RestController(settings, Collections.emptySet(), null); final RestController restController = new RestController(settings, Collections.emptySet(), null, null, null);
final RestIndicesAction action = new RestIndicesAction(settings, restController, new IndexNameExpressionResolver(settings)); final RestIndicesAction action = new RestIndicesAction(settings, restController, new IndexNameExpressionResolver(settings));
// build a (semi-)random table // build a (semi-)random table

View File

@ -50,7 +50,7 @@ public class RestRecoveryActionTests extends ESTestCase {
public void testRestRecoveryAction() { public void testRestRecoveryAction() {
final Settings settings = Settings.EMPTY; final Settings settings = Settings.EMPTY;
final RestController restController = new RestController(settings, Collections.emptySet(), null); final RestController restController = new RestController(settings, Collections.emptySet(), null, null, null);
final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController); final RestRecoveryAction action = new RestRecoveryAction(settings, restController, restController);
final int totalShards = randomIntBetween(1, 32); final int totalShards = randomIntBetween(1, 32);
final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2));

View File

@ -23,6 +23,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
@ -161,7 +162,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
} }
public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) { public QueryBuilder aliasFilter(IndexMetaData indexMetaData, String... aliasNames) {
ShardSearchRequest.FilterParser filterParser = bytes -> { CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry(), bytes)) { try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry(), bytes)) {
return new QueryParseContext(parser).parseInnerQueryBuilder(); return new QueryParseContext(parser).parseInnerQueryBuilder();
} }

View File

@ -0,0 +1,121 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ProfileResultTests extends ESTestCase {
public void testToXContent() throws IOException {
List<ProfileResult> children = new ArrayList<>();
children.add(new ProfileResult("child1", "desc1", Collections.emptyMap(), Collections.emptyList(), 100L));
children.add(new ProfileResult("child2", "desc2", Collections.emptyMap(), Collections.emptyList(), 123356L));
Map<String, Long> timings = new HashMap<>();
timings.put("key1", 12345L);
timings.put("key2", 6789L);
ProfileResult result = new ProfileResult("someType", "some description", timings, children, 123456L);
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"type\" : \"someType\",\n" +
" \"description\" : \"some description\",\n" +
" \"time_in_nanos\" : 123456,\n" +
" \"breakdown\" : {\n" +
" \"key1\" : 12345,\n" +
" \"key2\" : 6789\n" +
" },\n" +
" \"children\" : [\n" +
" {\n" +
" \"type\" : \"child1\",\n" +
" \"description\" : \"desc1\",\n" +
" \"time_in_nanos\" : 100,\n" +
" \"breakdown\" : { }\n" +
" },\n" +
" {\n" +
" \"type\" : \"child2\",\n" +
" \"description\" : \"desc2\",\n" +
" \"time_in_nanos\" : 123356,\n" +
" \"breakdown\" : { }\n" +
" }\n" +
" ]\n" +
"}", builder.string());
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"type\" : \"someType\",\n" +
" \"description\" : \"some description\",\n" +
" \"time\" : \"123.4micros\",\n" +
" \"time_in_nanos\" : 123456,\n" +
" \"breakdown\" : {\n" +
" \"key1\" : 12345,\n" +
" \"key2\" : 6789\n" +
" },\n" +
" \"children\" : [\n" +
" {\n" +
" \"type\" : \"child1\",\n" +
" \"description\" : \"desc1\",\n" +
" \"time\" : \"100nanos\",\n" +
" \"time_in_nanos\" : 100,\n" +
" \"breakdown\" : { }\n" +
" },\n" +
" {\n" +
" \"type\" : \"child2\",\n" +
" \"description\" : \"desc2\",\n" +
" \"time\" : \"123.3micros\",\n" +
" \"time_in_nanos\" : 123356,\n" +
" \"breakdown\" : { }\n" +
" }\n" +
" ]\n" +
"}", builder.string());
result = new ProfileResult("profileName", "some description", Collections.emptyMap(), Collections.emptyList(), 12345678L);
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"type\" : \"profileName\",\n" +
" \"description\" : \"some description\",\n" +
" \"time\" : \"12.3ms\",\n" +
" \"time_in_nanos\" : 12345678,\n" +
" \"breakdown\" : { }\n" +
"}", builder.string());
result = new ProfileResult("profileName", "some description", Collections.emptyMap(), Collections.emptyList(), 1234567890L);
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"type\" : \"profileName\",\n" +
" \"description\" : \"some description\",\n" +
" \"time\" : \"1.2s\",\n" +
" \"time_in_nanos\" : 1234567890,\n" +
" \"breakdown\" : { }\n" +
"}", builder.string());
}
}

View File

@ -0,0 +1,102 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile.query;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class CollectorResultTests extends ESTestCase {
public void testToXContent() throws IOException {
List<CollectorResult> children = new ArrayList<>();
children.add(new CollectorResult("child1", "reason1", 100L, Collections.emptyList()));
children.add(new CollectorResult("child2", "reason1", 123356L, Collections.emptyList()));
CollectorResult result = new CollectorResult("collectorName", "some reason", 123456L, children);
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"name\" : \"collectorName\",\n" +
" \"reason\" : \"some reason\",\n" +
" \"time_in_nanos\" : 123456,\n" +
" \"children\" : [\n" +
" {\n" +
" \"name\" : \"child1\",\n" +
" \"reason\" : \"reason1\",\n" +
" \"time_in_nanos\" : 100\n" +
" },\n" +
" {\n" +
" \"name\" : \"child2\",\n" +
" \"reason\" : \"reason1\",\n" +
" \"time_in_nanos\" : 123356\n" +
" }\n" +
" ]\n" +
"}", builder.string());
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"name\" : \"collectorName\",\n" +
" \"reason\" : \"some reason\",\n" +
" \"time\" : \"123.4micros\",\n" +
" \"time_in_nanos\" : 123456,\n" +
" \"children\" : [\n" +
" {\n" +
" \"name\" : \"child1\",\n" +
" \"reason\" : \"reason1\",\n" +
" \"time\" : \"100nanos\",\n" +
" \"time_in_nanos\" : 100\n" +
" },\n" +
" {\n" +
" \"name\" : \"child2\",\n" +
" \"reason\" : \"reason1\",\n" +
" \"time\" : \"123.3micros\",\n" +
" \"time_in_nanos\" : 123356\n" +
" }\n" +
" ]\n" +
"}", builder.string());
result = new CollectorResult("collectorName", "some reason", 12345678L, Collections.emptyList());
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"name\" : \"collectorName\",\n" +
" \"reason\" : \"some reason\",\n" +
" \"time\" : \"12.3ms\",\n" +
" \"time_in_nanos\" : 12345678\n" +
"}", builder.string());
result = new CollectorResult("collectorName", "some reason", 1234567890L, Collections.emptyList());
builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true);
result.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" +
" \"name\" : \"collectorName\",\n" +
" \"reason\" : \"some reason\",\n" +
" \"time\" : \"1.2s\",\n" +
" \"time_in_nanos\" : 1234567890\n" +
"}", builder.string());
}
}

View File

@ -29,10 +29,10 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor; import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.Function;
import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasToString;
public class ScalingThreadPoolTests extends ESThreadPoolTestCase { public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
@ -95,13 +95,8 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
}); });
} }
@FunctionalInterface
private interface SizeFunction {
int size(int numberOfProcessors);
}
private int expectedSize(final String threadPoolName, final int numberOfProcessors) { private int expectedSize(final String threadPoolName, final int numberOfProcessors) {
final Map<String, SizeFunction> sizes = new HashMap<>(); final Map<String, Function<Integer, Integer>> sizes = new HashMap<>();
sizes.put(ThreadPool.Names.GENERIC, n -> ThreadPool.boundedBy(4 * n, 128, 512)); sizes.put(ThreadPool.Names.GENERIC, n -> ThreadPool.boundedBy(4 * n, 128, 512));
sizes.put(ThreadPool.Names.MANAGEMENT, n -> 5); sizes.put(ThreadPool.Names.MANAGEMENT, n -> 5);
sizes.put(ThreadPool.Names.FLUSH, ThreadPool::halfNumberOfProcessorsMaxFive); sizes.put(ThreadPool.Names.FLUSH, ThreadPool::halfNumberOfProcessorsMaxFive);
@ -110,7 +105,7 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
sizes.put(ThreadPool.Names.SNAPSHOT, ThreadPool::halfNumberOfProcessorsMaxFive); sizes.put(ThreadPool.Names.SNAPSHOT, ThreadPool::halfNumberOfProcessorsMaxFive);
sizes.put(ThreadPool.Names.FETCH_SHARD_STARTED, ThreadPool::twiceNumberOfProcessors); sizes.put(ThreadPool.Names.FETCH_SHARD_STARTED, ThreadPool::twiceNumberOfProcessors);
sizes.put(ThreadPool.Names.FETCH_SHARD_STORE, ThreadPool::twiceNumberOfProcessors); sizes.put(ThreadPool.Names.FETCH_SHARD_STORE, ThreadPool::twiceNumberOfProcessors);
return sizes.get(threadPoolName).size(numberOfProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors);
} }
public void testScalingThreadPoolIsBounded() throws InterruptedException { public void testScalingThreadPoolIsBounded() throws InterruptedException {
@ -198,13 +193,4 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
terminateThreadPoolIfNeeded(threadPool); terminateThreadPoolIfNeeded(threadPool);
} }
} }
private static Settings settings(final String setting, final int value) {
return settings(setting, Integer.toString(value));
}
private static Settings settings(final String setting, final String value) {
return Settings.builder().put(setting, value).build();
}
} }

View File

@ -11,10 +11,10 @@ appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile appender.rolling.type = RollingFile
appender.rolling.name = rolling appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1 appender.rolling.policies.time.interval = 1
@ -26,10 +26,10 @@ rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.policies.size.size = 1GB
@ -43,10 +43,10 @@ logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type = PatternLayout appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1 appender.index_search_slowlog_rolling.policies.time.interval = 1
@ -59,10 +59,10 @@ logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1 appender.index_indexing_slowlog_rolling.policies.time.interval = 1

View File

@ -206,7 +206,14 @@ access to the returned response.
NOTE: A `ResponseException` is **not** thrown for `HEAD` requests that return NOTE: A `ResponseException` is **not** thrown for `HEAD` requests that return
a `404` status code because it is an expected `HEAD` response that simply a `404` status code because it is an expected `HEAD` response that simply
denotes that the resource is not found. All other HTTP methods (e.g., `GET`) denotes that the resource is not found. All other HTTP methods (e.g., `GET`)
throw a `ResponseException` for `404` responses. throw a `ResponseException` for `404` responses unless the `ignore` parameter
contains `404`. `ignore` is a special client parameter that doesn't get sent
to Elasticsearch and contains a comma separated list of error status codes.
It allows to control whether some error status code should be treated as an
expected response rather than as an exception. This is useful for instance
with the get api as it can return `404` when the document is missing, in which
case the response body will not contain an error but rather the usual get api
response, just without the document as it was not found.
=== Example requests === Example requests

View File

@ -2,9 +2,7 @@
=== Truncate Token Filter === Truncate Token Filter
The `truncate` token filter can be used to truncate tokens into a The `truncate` token filter can be used to truncate tokens into a
specific length. This can come in handy with keyword (single token) specific length.
based mapped fields that are used for sorting in order to reduce memory
usage.
It accepts a `length` parameter which control the number of characters It accepts a `length` parameter which control the number of characters
to truncate to, defaults to `10`. to truncate to, defaults to `10`.

View File

@ -33,3 +33,9 @@
The search shards API no longer accepts the `type` url parameter, which didn't The search shards API no longer accepts the `type` url parameter, which didn't
have any effect in previous versions. have any effect in previous versions.
==== Changes to the Profile API
* The `"time"` field showing human readable timing output has been replaced by the `"time_in_nanos"`
field which displays the elapsed time in nanoseconds. The `"time"` field can be turned on by adding
`"?human=true"` to the request url. It will display a rounded, human readable time value.

View File

@ -20,3 +20,10 @@ recognized anymore.
The `default` `index.store.type` has been removed. If you were using it, we The `default` `index.store.type` has been removed. If you were using it, we
advise that you simply remove it from your index settings and Elasticsearch advise that you simply remove it from your index settings and Elasticsearch
will use the best `store` implementation for your operating system. will use the best `store` implementation for your operating system.
==== Network settings
The blocking TCP client, blocking TCP server, and blocking HTTP server have been removed.
As a consequence, the `network.tcp.blocking_server`, `network.tcp.blocking_client`,
`network.tcp.blocking`,`transport.tcp.blocking_client`, `transport.tcp.blocking_server`,
and `http.tcp.blocking_server` settings are not recognized anymore.

View File

@ -58,7 +58,7 @@ This will yield the following result:
{ {
"type": "BooleanQuery", "type": "BooleanQuery",
"description": "message:message message:number", "description": "message:message message:number",
"time": "1.873811000ms", "time_in_nanos": "1873811",
"breakdown": { "breakdown": {
"score": 51306, "score": 51306,
"score_count": 4, "score_count": 4,
@ -77,7 +77,7 @@ This will yield the following result:
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "message:message", "description": "message:message",
"time": "0.3919430000ms", "time_in_nanos": "391943",
"breakdown": { "breakdown": {
"score": 28776, "score": 28776,
"score_count": 4, "score_count": 4,
@ -96,7 +96,7 @@ This will yield the following result:
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "message:number", "description": "message:number",
"time": "0.2106820000ms", "time_in_nanos": "210682",
"breakdown": { "breakdown": {
"score": 4552, "score": 4552,
"score_count": 4, "score_count": 4,
@ -120,12 +120,12 @@ This will yield the following result:
{ {
"name": "CancellableCollector", "name": "CancellableCollector",
"reason": "search_cancelled", "reason": "search_cancelled",
"time": "0.3043110000ms", "time_in_nanos": "304311",
"children": [ "children": [
{ {
"name": "SimpleTopScoreDocCollector", "name": "SimpleTopScoreDocCollector",
"reason": "search_top_hits", "reason": "search_top_hits",
"time": "0.03227300000ms" "time_in_nanos": "32273"
} }
] ]
} }
@ -143,22 +143,22 @@ This will yield the following result:
// TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/] // TESTRESPONSE[s/"id": "\[2aE02wS1R8q_QFnYu6vDVQ\]\[twitter\]\[1\]"/"id": $body.profile.shards.0.id/]
// TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/] // TESTRESPONSE[s/"rewrite_time": 51443/"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time/]
// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/] // TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/] // TESTRESPONSE[s/"time_in_nanos": "1873811"/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.time_in_nanos/]
// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/] // TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/] // TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/] // TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
// TESTRESPONSE[s/"time": "0.3919430000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.0.time/] // TESTRESPONSE[s/"time_in_nanos": "391943"/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.children.0.time_in_nanos/]
// TESTRESPONSE[s/"score": 28776/"score": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.score/] // TESTRESPONSE[s/"score": 28776/"score": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.score/]
// TESTRESPONSE[s/"build_scorer": 784451/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.build_scorer/] // TESTRESPONSE[s/"build_scorer": 784451/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.build_scorer/]
// TESTRESPONSE[s/"create_weight": 1669564/"create_weight": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.create_weight/] // TESTRESPONSE[s/"create_weight": 1669564/"create_weight": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.create_weight/]
// TESTRESPONSE[s/"next_doc": 10111/"next_doc": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.next_doc/] // TESTRESPONSE[s/"next_doc": 10111/"next_doc": $body.profile.shards.0.searches.0.query.0.children.0.breakdown.next_doc/]
// TESTRESPONSE[s/"time": "0.2106820000ms"/"time": $body.profile.shards.0.searches.0.query.0.children.1.time/] // TESTRESPONSE[s/"time_in_nanos": "210682"/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.children.1.time_in_nanos/]
// TESTRESPONSE[s/"score": 4552/"score": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.score/] // TESTRESPONSE[s/"score": 4552/"score": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.score/]
// TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/] // TESTRESPONSE[s/"build_scorer": 42602/"build_scorer": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.build_scorer/]
// TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/] // TESTRESPONSE[s/"create_weight": 89323/"create_weight": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.create_weight/]
// TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/] // TESTRESPONSE[s/"next_doc": 2852/"next_doc": $body.profile.shards.0.searches.0.query.0.children.1.breakdown.next_doc/]
// TESTRESPONSE[s/"time": "0.3043110000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] // TESTRESPONSE[s/"time_in_nanos": "304311"/"time_in_nanos": $body.profile.shards.0.searches.0.collector.0.time_in_nanos/]
// TESTRESPONSE[s/"time": "0.03227300000ms"/"time": $body.profile.shards.0.searches.0.collector.0.children.0.time/] // TESTRESPONSE[s/"time_in_nanos": "32273"/"time_in_nanos": $body.profile.shards.0.searches.0.collector.0.children.0.time_in_nanos/]
// Sorry for this mess.... // Sorry for this mess....
<1> Search results are returned, but were omitted here for brevity <1> Search results are returned, but were omitted here for brevity
@ -216,6 +216,10 @@ a `query` array and a `collector` array. Alongside the `search` object is an `a
There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds). There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds).
NOTE: As with other statistics apis, the Profile API supports human readable outputs. This can be turned on by adding
`?human=true` to the query string. In this case, the output contains the additional `"time"` field containing rounded,
human readable timing information (e.g. `"time": "391,9ms"`, `"time": "123.3micros"`).
=== Profiling Queries === Profiling Queries
[NOTE] [NOTE]
@ -244,19 +248,19 @@ The overall structure of this query tree will resemble your original Elasticsear
{ {
"type": "BooleanQuery", "type": "BooleanQuery",
"description": "message:message message:number", "description": "message:message message:number",
"time": "1.873811000ms", "time_in_nanos": "1873811",
"breakdown": {...}, <1> "breakdown": {...}, <1>
"children": [ "children": [
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "message:message", "description": "message:message",
"time": "0.3919430000ms", "time_in_nanos": "391943",
"breakdown": {...} "breakdown": {...}
}, },
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "message:number", "description": "message:number",
"time": "0.2106820000ms", "time_in_nanos": "210682",
"breakdown": {...} "breakdown": {...}
} }
] ]
@ -265,9 +269,9 @@ The overall structure of this query tree will resemble your original Elasticsear
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n/] // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n/]
// TESTRESPONSE[s/]$/],"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/] // TESTRESPONSE[s/]$/],"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
// TESTRESPONSE[s/"time": "1.873811000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.breakdown/] // TESTRESPONSE[s/"time_in_nanos": "1873811",\n.+"breakdown": \{...\}/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.time_in_nanos, "breakdown": $body.profile.shards.0.searches.0.query.0.breakdown/]
// TESTRESPONSE[s/"time": "0.3919430000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.0.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.0.breakdown/] // TESTRESPONSE[s/"time_in_nanos": "391943",\n.+"breakdown": \{...\}/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.children.0.time_in_nanos, "breakdown": $body.profile.shards.0.searches.0.query.0.children.0.breakdown/]
// TESTRESPONSE[s/"time": "0.2106820000ms",\n.+"breakdown": \{...\}/"time": $body.profile.shards.0.searches.0.query.0.children.1.time, "breakdown": $body.profile.shards.0.searches.0.query.0.children.1.breakdown/] // TESTRESPONSE[s/"time_in_nanos": "210682",\n.+"breakdown": \{...\}/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.children.1.time_in_nanos, "breakdown": $body.profile.shards.0.searches.0.query.0.children.1.breakdown/]
<1> The breakdown timings are omitted for simplicity <1> The breakdown timings are omitted for simplicity
Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two
@ -276,7 +280,7 @@ the equivalent name in Elasticsearch. The `"description"` field displays the Lu
is made available to help differentiating between parts of your query (e.g. both `"message:search"` and `"message:test"` is made available to help differentiating between parts of your query (e.g. both `"message:search"` and `"message:test"`
are TermQuery's and would appear identical otherwise. are TermQuery's and would appear identical otherwise.
The `"time"` field shows that this query took ~15ms for the entire BooleanQuery to execute. The recorded time is inclusive The `"time_in_nanos"` field shows that this query took ~1.8ms for the entire BooleanQuery to execute. The recorded time is inclusive
of all children. of all children.
The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at
@ -305,16 +309,16 @@ The `"breakdown"` component lists detailed timing statistics about low-level Luc
"advance_count": 0 "advance_count": 0
} }
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time": $body.profile.shards.0.searches.0.query.0.time,/] // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time_in_nanos": $body.profile.shards.0.searches.0.query.0.time_in_nanos,/]
// TESTRESPONSE[s/}$/},\n"children": $body.profile.shards.0.searches.0.query.0.children}],\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/] // TESTRESPONSE[s/}$/},\n"children": $body.profile.shards.0.searches.0.query.0.children}],\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time, "collector": $body.profile.shards.0.searches.0.collector}], "aggregations": []}]}}/]
// TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/] // TESTRESPONSE[s/"score": 51306/"score": $body.profile.shards.0.searches.0.query.0.breakdown.score/]
// TESTRESPONSE[s/"time": "1.873811000ms"/"time": $body.profile.shards.0.searches.0.query.0.time/] // TESTRESPONSE[s/"time_in_nanos": "1873811"/"time_in_nanos": $body.profile.shards.0.searches.0.query.0.time_in_nanos/]
// TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/] // TESTRESPONSE[s/"build_scorer": 2935582/"build_scorer": $body.profile.shards.0.searches.0.query.0.breakdown.build_scorer/]
// TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/] // TESTRESPONSE[s/"create_weight": 919297/"create_weight": $body.profile.shards.0.searches.0.query.0.breakdown.create_weight/]
// TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/] // TESTRESPONSE[s/"next_doc": 53876/"next_doc": $body.profile.shards.0.searches.0.query.0.breakdown.next_doc/]
Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall
`"time"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is `"time_in_nanos"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is
actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time, actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time,
the breakdown is inclusive of all children times. the breakdown is inclusive of all children times.
@ -401,12 +405,12 @@ Looking at the previous example:
{ {
"name": "CancellableCollector", "name": "CancellableCollector",
"reason": "search_cancelled", "reason": "search_cancelled",
"time": "0.3043110000ms", "time_in_nanos": "304311",
"children": [ "children": [
{ {
"name": "SimpleTopScoreDocCollector", "name": "SimpleTopScoreDocCollector",
"reason": "search_top_hits", "reason": "search_top_hits",
"time": "0.03227300000ms" "time_in_nanos": "32273"
} }
] ]
} }
@ -414,12 +418,12 @@ Looking at the previous example:
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/] // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.profile.shards.0.id",\n"searches": [{\n"query": $body.profile.shards.0.searches.0.query,\n"rewrite_time": $body.profile.shards.0.searches.0.rewrite_time,/]
// TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/] // TESTRESPONSE[s/]$/]}], "aggregations": []}]}}/]
// TESTRESPONSE[s/"time": "0.3043110000ms"/"time": $body.profile.shards.0.searches.0.collector.0.time/] // TESTRESPONSE[s/"time_in_nanos": "304311"/"time_in_nanos": $body.profile.shards.0.searches.0.collector.0.time_in_nanos/]
// TESTRESPONSE[s/"time": "0.03227300000ms"/"time": $body.profile.shards.0.searches.0.collector.0.children.0.time/] // TESTRESPONSE[s/"time_in_nanos": "32273"/"time_in_nanos": $body.profile.shards.0.searches.0.collector.0.children.0.time_in_nanos/]
We see a single collector named `SimpleTopScoreDocCollector` wrapped into `CancellableCollector`. `SimpleTopScoreDocCollector` is the default "scoring and sorting" We see a single collector named `SimpleTopScoreDocCollector` wrapped into `CancellableCollector`. `SimpleTopScoreDocCollector` is the default "scoring and sorting"
`Collector` used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The `Collector` used by Elasticsearch. The `"reason"` field attempts to give a plain english description of the class name. The
`"time"` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists `"time_in_nanos"` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists
all sub-collectors. The `CancellableCollector` that wraps `SimpleTopScoreDocCollector` is used by elasticsearch to detect if the current all sub-collectors. The `CancellableCollector` that wraps `SimpleTopScoreDocCollector` is used by elasticsearch to detect if the current
search was cancelled and stop collecting documents as soon as it occurs. search was cancelled and stop collecting documents as soon as it occurs.
@ -564,7 +568,7 @@ And the response:
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "my_field:foo", "description": "my_field:foo",
"time": "0.4094560000ms", "time_in_nanos": "409456",
"breakdown": { "breakdown": {
"score": 0, "score": 0,
"score_count": 1, "score_count": 1,
@ -583,7 +587,7 @@ And the response:
{ {
"type": "TermQuery", "type": "TermQuery",
"description": "message:search", "description": "message:search",
"time": "0.3037020000ms", "time_in_nanos": "303702",
"breakdown": { "breakdown": {
"score": 0, "score": 0,
"score_count": 1, "score_count": 1,
@ -605,24 +609,24 @@ And the response:
{ {
"name": "MultiCollector", "name": "MultiCollector",
"reason": "search_multi", "reason": "search_multi",
"time": "1.378943000ms", "time_in_nanos": "1378943",
"children": [ "children": [
{ {
"name": "FilteredCollector", "name": "FilteredCollector",
"reason": "search_post_filter", "reason": "search_post_filter",
"time": "0.4036590000ms", "time_in_nanos": "403659",
"children": [ "children": [
{ {
"name": "SimpleTopScoreDocCollector", "name": "SimpleTopScoreDocCollector",
"reason": "search_top_hits", "reason": "search_top_hits",
"time": "0.006391000000ms" "time_in_nanos": "6391"
} }
] ]
}, },
{ {
"name": "BucketCollector: [[non_global_term, another_agg]]", "name": "BucketCollector: [[non_global_term, another_agg]]",
"reason": "aggregation", "reason": "aggregation",
"time": "0.9546020000ms" "time_in_nanos": "954602"
} }
] ]
} }
@ -633,7 +637,7 @@ And the response:
{ {
"type": "MatchAllDocsQuery", "type": "MatchAllDocsQuery",
"description": "*:*", "description": "*:*",
"time": "0.04829300000ms", "time_in_nanos": "48293",
"breakdown": { "breakdown": {
"score": 0, "score": 0,
"score_count": 1, "score_count": 1,
@ -655,7 +659,7 @@ And the response:
{ {
"name": "GlobalAggregator: [global_agg]", "name": "GlobalAggregator: [global_agg]",
"reason": "aggregation_global", "reason": "aggregation_global",
"time": "0.1226310000ms" "time_in_nanos": "122631"
} }
] ]
} }
@ -738,7 +742,7 @@ Which yields the following aggregation profile output
{ {
"type": "org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator", "type": "org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator",
"description": "property_type", "description": "property_type",
"time": "4280.456978ms", "time_in_nanos": "4280456978",
"breakdown": { "breakdown": {
"reduce": 0, "reduce": 0,
"reduce_count": 0, "reduce_count": 0,
@ -753,7 +757,7 @@ Which yields the following aggregation profile output
{ {
"type": "org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator", "type": "org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator",
"description": "avg_price", "description": "avg_price",
"time": "1124.864392ms", "time_in_nanos": "1124864392",
"breakdown": { "breakdown": {
"reduce": 0, "reduce": 0,
"reduce_count": 0, "reduce_count": 0,
@ -773,7 +777,7 @@ Which yields the following aggregation profile output
From the profile structure we can see our `property_type` terms aggregation which is internally represented by the From the profile structure we can see our `property_type` terms aggregation which is internally represented by the
`GlobalOrdinalsStringTermsAggregator` class and the sub aggregator `avg_price` which is internally represented by the `AvgAggregator` class. The `type` field displays the class used internally to represent the aggregation. The `description` field displays the name of the aggregation. `GlobalOrdinalsStringTermsAggregator` class and the sub aggregator `avg_price` which is internally represented by the `AvgAggregator` class. The `type` field displays the class used internally to represent the aggregation. The `description` field displays the name of the aggregation.
The `"time"` field shows that it took ~4 seconds for the entire aggregation to execute. The recorded time is inclusive The `"time_in_nanos"` field shows that it took ~4 seconds for the entire aggregation to execute. The recorded time is inclusive
of all children. of all children.
The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at

View File

@ -11,7 +11,7 @@ The following is an example of the search request body:
GET /_search GET /_search
{ {
"query" : { "query" : {
"match": { "user": "kimchy" } "match": { "content": "kimchy" }
}, },
"highlight" : { "highlight" : {
"fields" : { "fields" : {

View File

@ -112,22 +112,30 @@ command line with `es.node.name` or in the config file with `node.name`.
Elasticsearch uses http://logging.apache.org/log4j/2.x/[Log4j 2] for Elasticsearch uses http://logging.apache.org/log4j/2.x/[Log4j 2] for
logging. Log4j 2 can be configured using the log4j2.properties logging. Log4j 2 can be configured using the log4j2.properties
file. Elasticsearch exposes a single property `${sys:es.logs}` that can be file. Elasticsearch exposes three properties, `${sys:es.logs.base_path},
referenced in the configuration file to determine the location of the log files; `${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name} (if the node name
this will resolve to a prefix for the Elasticsearch log file at runtime. is explicitly set via `node.name`) that can be referenced in the configuration
file to determine the location of the log files. The property
`${sys:es.logs.base_path}` will resolve to the log directory,
`${sys:es.logs.cluster_name}` will resolve to the cluster name (used as the
prefix of log filenames in the default configuration), and
`${sys:es.logs.node_name}` will resolve to the node name (if the node name is
explicitly set).
For example, if your log directory (`path.logs`) is `/var/log/elasticsearch` and For example, if your log directory (`path.logs`) is `/var/log/elasticsearch` and
your cluster is named `production` then `${sys:es.logs}` will resolve to your cluster is named `production` then `${sys:es.logs.base_path}` will resolve
`/var/log/elasticsearch/production`. to `/var/log/elasticsearch` and
`${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log`
will resolve to `/var/log/elasticsearch/production.log`.
[source,properties] [source,properties]
-------------------------------------------------- --------------------------------------------------
appender.rolling.type = RollingFile <1> appender.rolling.type = RollingFile <1>
appender.rolling.name = rolling appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs}.log <2> appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2>
appender.rolling.layout.type = PatternLayout appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log <3> appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log <3>
appender.rolling.policies.type = Policies appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4> appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4>
appender.rolling.policies.time.interval = 1 <5> appender.rolling.policies.time.interval = 1 <5>
@ -145,6 +153,31 @@ appender.rolling.policies.time.modulate = true <6>
If you append `.gz` or `.zip` to `appender.rolling.filePattern`, then the logs If you append `.gz` or `.zip` to `appender.rolling.filePattern`, then the logs
will be compressed as they are rolled. will be compressed as they are rolled.
If you want to retain log files for a specified period of time, you can use a
rollover strategy with a delete action.
[source,properties]
--------------------------------------------------
appender.rolling.strategy.type = DefaultRolloverStrategy <1>
appender.rolling.strategy.action.type = Delete <2>
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} <3>
appender.rolling.strategy.action.condition.type = IfLastModified <4>
appender.rolling.strategy.action.condition.age = 7D <5>
appender.rolling.strategy.action.PathConditions.type = IfFileName <6>
appender.rolling.strategy.action.PathConditions.glob = ${sys:es.logs.cluster_name}-* <7>
--------------------------------------------------
<1> Configure the `DefaultRolloverStrategy`
<2> Configure the `Delete` action for handling rollovers
<3> The base path to the Elasticsearch logs
<4> The condition to apply when handling rollovers
<5> Retain logs for seven days
<6> Only delete files older than seven days if they match the specified glob
<7> Delete files from the base path matching the glob
`${sys:es.logs.cluster_name}-*`; this is the glob that log files are rolled
to; this is needed to only delete the rolled Elasticsearch logs but not also
delete the deprecation and slow logs
Multiple configuration files can be loaded (in which case they will get merged) Multiple configuration files can be loaded (in which case they will get merged)
as long as they are named `log4j2.properties` and have the Elasticsearch config as long as they are named `log4j2.properties` and have the Elasticsearch config
directory as an ancestor; this is useful for plugins that expose additional directory as an ancestor; this is useful for plugins that expose additional

View File

@ -32,6 +32,7 @@ import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
@ -90,8 +91,8 @@ final class PercolateQuery extends Query implements Accountable {
if (result == docId) { if (result == docId) {
if (twoPhaseIterator.matches()) { if (twoPhaseIterator.matches()) {
if (needsScores) { if (needsScores) {
QueryStore.Leaf percolatorQueries = queryStore.getQueries(leafReaderContext); CheckedFunction<Integer, Query, IOException> percolatorQueries = queryStore.getQueries(leafReaderContext);
Query query = percolatorQueries.getQuery(docId); Query query = percolatorQueries.apply(docId);
Explanation detail = percolatorIndexSearcher.explain(query, 0); Explanation detail = percolatorIndexSearcher.explain(query, 0);
return Explanation.match(scorer.score(), "PercolateQuery", detail); return Explanation.match(scorer.score(), "PercolateQuery", detail);
} else { } else {
@ -120,7 +121,7 @@ final class PercolateQuery extends Query implements Accountable {
return null; return null;
} }
final QueryStore.Leaf queries = queryStore.getQueries(leafReaderContext); final CheckedFunction<Integer, Query, IOException> queries = queryStore.getQueries(leafReaderContext);
if (needsScores) { if (needsScores) {
return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) { return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
@ -128,7 +129,7 @@ final class PercolateQuery extends Query implements Accountable {
@Override @Override
boolean matchDocId(int docId) throws IOException { boolean matchDocId(int docId) throws IOException {
Query query = percolatorQueries.getQuery(docId); Query query = percolatorQueries.apply(docId);
if (query != null) { if (query != null) {
TopDocs topDocs = percolatorIndexSearcher.search(query, 1); TopDocs topDocs = percolatorIndexSearcher.search(query, 1);
if (topDocs.totalHits > 0) { if (topDocs.totalHits > 0) {
@ -166,7 +167,7 @@ final class PercolateQuery extends Query implements Accountable {
if (verifiedDocsBits.get(docId)) { if (verifiedDocsBits.get(docId)) {
return true; return true;
} }
Query query = percolatorQueries.getQuery(docId); Query query = percolatorQueries.apply(docId);
return query != null && Lucene.exists(percolatorIndexSearcher, query); return query != null && Lucene.exists(percolatorIndexSearcher, query);
} }
}; };
@ -224,26 +225,18 @@ final class PercolateQuery extends Query implements Accountable {
} }
@FunctionalInterface @FunctionalInterface
public interface QueryStore { interface QueryStore {
CheckedFunction<Integer, Query, IOException> getQueries(LeafReaderContext ctx) throws IOException;
Leaf getQueries(LeafReaderContext ctx) throws IOException;
@FunctionalInterface
interface Leaf {
Query getQuery(int docId) throws IOException;
}
} }
abstract static class BaseScorer extends Scorer { abstract static class BaseScorer extends Scorer {
final Scorer approximation; final Scorer approximation;
final QueryStore.Leaf percolatorQueries; final CheckedFunction<Integer, Query, IOException> percolatorQueries;
final IndexSearcher percolatorIndexSearcher; final IndexSearcher percolatorIndexSearcher;
BaseScorer(Weight weight, Scorer approximation, QueryStore.Leaf percolatorQueries, IndexSearcher percolatorIndexSearcher) { BaseScorer(Weight weight, Scorer approximation, CheckedFunction<Integer, Query, IOException> percolatorQueries,
IndexSearcher percolatorIndexSearcher) {
super(weight); super(weight);
this.approximation = approximation; this.approximation = approximation;
this.percolatorQueries = percolatorQueries; this.percolatorQueries = percolatorQueries;

View File

@ -86,7 +86,7 @@ public final class PercolatorHighlightSubFetchPhase extends HighlightPhase {
try { try {
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
int segmentDocId = hit.docId() - ctx.docBase; int segmentDocId = hit.docId() - ctx.docBase;
query = queryStore.getQueries(ctx).getQuery(segmentDocId); query = queryStore.getQueries(ctx).apply(segmentDocId);
} catch (IOException e) { } catch (IOException e) {
throw new RuntimeException(e); throw new RuntimeException(e);
} }

View File

@ -60,6 +60,7 @@ import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -384,13 +385,13 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
@Override @Override
public Scorer scorer(LeafReaderContext context) throws IOException { public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSetIterator allDocs = DocIdSetIterator.all(context.reader().maxDoc()); DocIdSetIterator allDocs = DocIdSetIterator.all(context.reader().maxDoc());
PercolateQuery.QueryStore.Leaf leaf = queryStore.getQueries(context); CheckedFunction<Integer, Query, IOException> leaf = queryStore.getQueries(context);
FilteredDocIdSetIterator memoryIndexIterator = new FilteredDocIdSetIterator(allDocs) { FilteredDocIdSetIterator memoryIndexIterator = new FilteredDocIdSetIterator(allDocs) {
@Override @Override
protected boolean match(int doc) { protected boolean match(int doc) {
try { try {
Query query = leaf.getQuery(doc); Query query = leaf.apply(doc);
float score = memoryIndex.search(query); float score = memoryIndex.search(query);
if (score != 0f) { if (score != 0f) {
if (needsScores) { if (needsScores) {

View File

@ -32,7 +32,6 @@ import io.netty.channel.ChannelOption;
import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.FixedRecvByteBufAllocator;
import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.oio.OioEventLoopGroup;
import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentCompressor;
import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpContentDecompressor;
@ -63,7 +62,6 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.HttpStats;
import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;
@ -79,7 +77,6 @@ import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler;
import org.elasticsearch.transport.netty4.Netty4Utils; import org.elasticsearch.transport.netty4.Netty4Utils;
import org.elasticsearch.transport.netty4.channel.PrivilegedNioServerSocketChannel; import org.elasticsearch.transport.netty4.channel.PrivilegedNioServerSocketChannel;
import org.elasticsearch.transport.netty4.channel.PrivilegedOioServerSocketChannel;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
@ -136,8 +133,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope, Property.Shared); boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope, Property.Shared);
public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE = public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE =
boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope, Property.Shared); boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope, Property.Shared);
public static final Setting<Boolean> SETTING_HTTP_TCP_BLOCKING_SERVER =
boolSetting("http.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope, Property.Shared);
public static final Setting<Boolean> SETTING_HTTP_TCP_REUSE_ADDRESS = public static final Setting<Boolean> SETTING_HTTP_TCP_REUSE_ADDRESS =
boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope, Property.Shared); boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope, Property.Shared);
@ -175,8 +170,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
protected final int workerCount; protected final int workerCount;
protected final boolean blockingServer;
protected final boolean pipelining; protected final boolean pipelining;
protected final int pipeliningMaxEvents; protected final int pipeliningMaxEvents;
@ -210,6 +203,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
protected final ByteSizeValue maxCumulationBufferCapacity; protected final ByteSizeValue maxCumulationBufferCapacity;
protected final int maxCompositeBufferComponents; protected final int maxCompositeBufferComponents;
private final Dispatcher dispatcher;
protected volatile ServerBootstrap serverBootstrap; protected volatile ServerBootstrap serverBootstrap;
@ -220,17 +214,17 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
// package private for testing // package private for testing
Netty4OpenChannelsHandler serverOpenChannels; Netty4OpenChannelsHandler serverOpenChannels;
protected volatile HttpServerAdapter httpServerAdapter;
private final Netty4CorsConfig corsConfig; private final Netty4CorsConfig corsConfig;
public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool,
NamedXContentRegistry xContentRegistry) { NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) {
super(settings); super(settings);
this.networkService = networkService; this.networkService = networkService;
this.bigArrays = bigArrays; this.bigArrays = bigArrays;
this.threadPool = threadPool; this.threadPool = threadPool;
this.xContentRegistry = xContentRegistry; this.xContentRegistry = xContentRegistry;
this.dispatcher = dispatcher;
ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
@ -240,7 +234,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
this.maxCumulationBufferCapacity = SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings); this.maxCumulationBufferCapacity = SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings);
this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
this.blockingServer = SETTING_HTTP_TCP_BLOCKING_SERVER.get(settings);
this.port = SETTING_HTTP_PORT.get(settings); this.port = SETTING_HTTP_PORT.get(settings);
this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); this.bindHosts = SETTING_HTTP_BIND_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY); this.publishHosts = SETTING_HTTP_PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
@ -286,11 +279,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
return this.settings; return this.settings;
} }
@Override
public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {
this.httpServerAdapter = httpServerAdapter;
}
@Override @Override
protected void doStart() { protected void doStart() {
boolean success = false; boolean success = false;
@ -298,15 +286,10 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
this.serverOpenChannels = new Netty4OpenChannelsHandler(logger); this.serverOpenChannels = new Netty4OpenChannelsHandler(logger);
serverBootstrap = new ServerBootstrap(); serverBootstrap = new ServerBootstrap();
if (blockingServer) {
serverBootstrap.group(new OioEventLoopGroup(workerCount, daemonThreadFactory(settings, serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings,
HTTP_SERVER_WORKER_THREAD_NAME_PREFIX))); HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(PrivilegedOioServerSocketChannel.class); serverBootstrap.channel(PrivilegedNioServerSocketChannel.class);
} else {
serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings,
HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(PrivilegedNioServerSocketChannel.class);
}
serverBootstrap.childHandler(configureServerChannelHandler()); serverBootstrap.childHandler(configureServerChannelHandler());
@ -331,6 +314,9 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress);
this.boundAddress = createBoundHttpAddress(); this.boundAddress = createBoundHttpAddress();
if (logger.isInfoEnabled()) {
logger.info("{}", boundAddress);
}
success = true; success = true;
} finally { } finally {
if (success == false) { if (success == false) {
@ -511,7 +497,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
} }
protected void dispatchRequest(RestRequest request, RestChannel channel) { protected void dispatchRequest(RestRequest request, RestChannel channel) {
httpServerAdapter.dispatchRequest(request, channel, threadPool.getThreadContext()); dispatcher.dispatch(request, channel, threadPool.getThreadContext());
} }
protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {

View File

@ -58,7 +58,6 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT, Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT,
Netty4HttpServerTransport.SETTING_HTTP_TCP_NO_DELAY, Netty4HttpServerTransport.SETTING_HTTP_TCP_NO_DELAY,
Netty4HttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE, Netty4HttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE,
Netty4HttpServerTransport.SETTING_HTTP_TCP_BLOCKING_SERVER,
Netty4HttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS, Netty4HttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS,
Netty4HttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE, Netty4HttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE,
Netty4HttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE, Netty4HttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE,
@ -93,9 +92,12 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
@Override @Override
public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService,
NamedXContentRegistry xContentRegistry, NetworkService networkService) { NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry,
NetworkService networkService,
HttpServerTransport.Dispatcher dispatcher) {
return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME, return Collections.singletonMap(NETTY_HTTP_TRANSPORT_NAME,
() -> new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry)); () -> new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher));
} }
} }

View File

@ -32,7 +32,6 @@ import io.netty.channel.ChannelOption;
import io.netty.channel.FixedRecvByteBufAllocator; import io.netty.channel.FixedRecvByteBufAllocator;
import io.netty.channel.RecvByteBufAllocator; import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.oio.OioEventLoopGroup;
import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Future;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
@ -65,8 +64,6 @@ import org.elasticsearch.transport.TransportServiceAdapter;
import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.transport.netty4.channel.PrivilegedNioServerSocketChannel; import org.elasticsearch.transport.netty4.channel.PrivilegedNioServerSocketChannel;
import org.elasticsearch.transport.netty4.channel.PrivilegedNioSocketChannel; import org.elasticsearch.transport.netty4.channel.PrivilegedNioSocketChannel;
import org.elasticsearch.transport.netty4.channel.PrivilegedOioServerSocketChannel;
import org.elasticsearch.transport.netty4.channel.PrivilegedOioSocketChannel;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -193,13 +190,8 @@ public class Netty4Transport extends TcpTransport<Channel> {
private Bootstrap createBootstrap() { private Bootstrap createBootstrap() {
final Bootstrap bootstrap = new Bootstrap(); final Bootstrap bootstrap = new Bootstrap();
if (TCP_BLOCKING_CLIENT.get(settings)) { bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)));
bootstrap.group(new OioEventLoopGroup(1, daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))); bootstrap.channel(PrivilegedNioSocketChannel.class);
bootstrap.channel(PrivilegedOioSocketChannel.class);
} else {
bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)));
bootstrap.channel(PrivilegedNioSocketChannel.class);
}
bootstrap.handler(getClientChannelInitializer()); bootstrap.handler(getClientChannelInitializer());
@ -282,13 +274,8 @@ public class Netty4Transport extends TcpTransport<Channel> {
final ServerBootstrap serverBootstrap = new ServerBootstrap(); final ServerBootstrap serverBootstrap = new ServerBootstrap();
if (TCP_BLOCKING_SERVER.get(settings)) { serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory));
serverBootstrap.group(new OioEventLoopGroup(workerCount, workerFactory)); serverBootstrap.channel(PrivilegedNioServerSocketChannel.class);
serverBootstrap.channel(PrivilegedOioServerSocketChannel.class);
} else {
serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory));
serverBootstrap.channel(PrivilegedNioServerSocketChannel.class);
}
serverBootstrap.childHandler(getServerChannelInitializer(name, settings)); serverBootstrap.childHandler(getServerChannelInitializer(name, settings));

View File

@ -1,50 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.netty4.channel;
import io.netty.channel.socket.oio.OioServerSocketChannel;
import org.elasticsearch.SpecialPermission;
import java.net.ServerSocket;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.List;
/**
* Wraps netty calls to {@link ServerSocket#accept()} in {@link AccessController#doPrivileged(PrivilegedAction)} blocks.
* This is necessary to limit {@link java.net.SocketPermission} to the transport module.
*/
public class PrivilegedOioServerSocketChannel extends OioServerSocketChannel {
@Override
protected int doReadMessages(List<Object> buf) throws Exception {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
try {
return AccessController.doPrivileged((PrivilegedExceptionAction<Integer>) () -> super.doReadMessages(buf));
} catch (PrivilegedActionException e) {
throw (Exception) e.getCause();
}
}
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.netty4.channel;
import io.netty.channel.socket.oio.OioSocketChannel;
import org.elasticsearch.SpecialPermission;
import java.net.SocketAddress;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
/**
* Wraps netty calls to {@link java.net.Socket#connect(SocketAddress)} in
* {@link AccessController#doPrivileged(PrivilegedAction)} blocks. This is necessary to limit
* {@link java.net.SocketPermission} to the transport module.
*/
public class PrivilegedOioSocketChannel extends OioSocketChannel {
@Override
protected void doConnect(SocketAddress remoteAddress, SocketAddress localAddress) throws Exception {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
try {
AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
super.doConnect(remoteAddress, localAddress);
return null;
});
} catch (PrivilegedActionException e) {
throw (Exception) e.getCause();
}
super.doConnect(remoteAddress, localAddress);
}
}

View File

@ -188,7 +188,8 @@ public class Netty4HttpChannelTests extends ESTestCase {
public void testHeadersSet() { public void testHeadersSet() {
Settings settings = Settings.builder().build(); Settings settings = Settings.builder().build();
try (Netty4HttpServerTransport httpServerTransport = try (Netty4HttpServerTransport httpServerTransport =
new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry())) { new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),
(request, channel, context) -> {})) {
httpServerTransport.start(); httpServerTransport.start();
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote");
@ -218,7 +219,8 @@ public class Netty4HttpChannelTests extends ESTestCase {
public void testConnectionClose() throws Exception { public void testConnectionClose() throws Exception {
final Settings settings = Settings.builder().build(); final Settings settings = Settings.builder().build();
try (Netty4HttpServerTransport httpServerTransport = try (Netty4HttpServerTransport httpServerTransport =
new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry())) { new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),
(request, channel, context) -> {})) {
httpServerTransport.start(); httpServerTransport.start();
final FullHttpRequest httpRequest; final FullHttpRequest httpRequest;
final boolean close = randomBoolean(); final boolean close = randomBoolean();
@ -253,7 +255,8 @@ public class Netty4HttpChannelTests extends ESTestCase {
private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) {
// construct request and send it over the transport layer // construct request and send it over the transport layer
try (Netty4HttpServerTransport httpServerTransport = try (Netty4HttpServerTransport httpServerTransport =
new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry())) { new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(),
(request, channel, context) -> {})) {
httpServerTransport.start(); httpServerTransport.start();
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
if (originValue != null) { if (originValue != null) {

View File

@ -160,7 +160,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
Netty4HttpServerPipeliningTests.this.networkService, Netty4HttpServerPipeliningTests.this.networkService,
Netty4HttpServerPipeliningTests.this.bigArrays, Netty4HttpServerPipeliningTests.this.bigArrays,
Netty4HttpServerPipeliningTests.this.threadPool, Netty4HttpServerPipeliningTests.this.threadPool,
xContentRegistry()); xContentRegistry(), (request, channel, context) -> {});
} }
@Override @Override

View File

@ -121,9 +121,8 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
*/ */
public void testExpectContinueHeader() throws Exception { public void testExpectContinueHeader() throws Exception {
try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool,
xContentRegistry())) { xContentRegistry(), (request, channel, context) ->
transport.httpServerAdapter((request, channel, context) -> channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))))) {
channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))));
transport.start(); transport.start();
TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
@ -145,12 +144,12 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
public void testBindUnavailableAddress() { public void testBindUnavailableAddress() {
try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool,
xContentRegistry())) { xContentRegistry(), (request, channel, context) -> {})) {
transport.start(); transport.start();
TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build();
try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool,
xContentRegistry())) { xContentRegistry(), (request, channel, context) -> {})) {
BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start());
assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage());
} }

View File

@ -35,20 +35,11 @@ import java.io.IOException;
public class PolishStemTokenFilterFactory extends AbstractTokenFilterFactory { public class PolishStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final StempelStemmer stemmer;
public PolishStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { public PolishStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
Trie tire;
try {
tire = StempelStemmer.load(PolishAnalyzer.class.getResourceAsStream(PolishAnalyzer.DEFAULT_STEMMER_FILE));
} catch (IOException ex) {
throw new RuntimeException("Unable to load default stemming tables", ex);
}
stemmer = new StempelStemmer(tire);
} }
@Override public TokenStream create(TokenStream tokenStream) { @Override public TokenStream create(TokenStream tokenStream) {
return new StempelFilter(tokenStream, stemmer); return new StempelFilter(tokenStream, new StempelStemmer(PolishAnalyzer.getDefaultTable()));
} }
} }

Some files were not shown because too many files have changed in this diff Show More